diff --git a/community_tasks/_template.py b/community_tasks/_template.py index 2db28e340..bfc7de505 100644 --- a/community_tasks/_template.py +++ b/community_tasks/_template.py @@ -30,13 +30,10 @@ """ import numpy as np -from aenum import extend_enum -from lighteval.metrics.metrics import Metrics, SampleLevelMetric -from lighteval.metrics.utils.metric_utils import MetricCategory, MetricUseCase -from lighteval.tasks.default_prompts import LETTER_INDICES +from lighteval.metrics.metrics import SampleLevelMetric from lighteval.tasks.lighteval_task import LightevalTaskConfig -from lighteval.tasks.requests import Doc +from lighteval.tasks.requests import Doc, SamplingMethod # DEFINE YOUR PROMPT FUNCTIONS @@ -49,7 +46,7 @@ def prompt_fn(line, task_name: str = None): return Doc( task_name=task_name, query="", - choices="", + choices=[""], gold_index=0, instruction="", ) @@ -68,7 +65,7 @@ def prompt_fn(line, task_name: str = None): evaluation_splits=[], few_shots_split="", few_shots_select="", - metric=[], # select your metric in Metrics + metrics=[], # select your metric in Metrics ) # EVALS WITH SUBSET @@ -91,7 +88,7 @@ def __init__( hf_subset=hf_subset, prompt_function=prompt_fn, # must be defined in the file or imported from src/lighteval/tasks/tasks_prompt_formatting.py hf_repo="", - metric=[custom_metric], # select your metric in Metrics or use your custom_metric + metrics=[custom_metric], # select your metric in Metrics or use your custom_metric hf_avail_splits=[], evaluation_splits=[], few_shots_split="", @@ -111,8 +108,7 @@ def __init__( custom_metric = SampleLevelMetric( metric_name="my_custom_metric_name", higher_is_better=True, - category=MetricCategory.IGNORED, - use_case=MetricUseCase.NONE, + category=SamplingMethod.GENERATIVE, # or LOGPROBS, PERPLEXITY, etc. sample_level_fn=lambda x: x, # how to compute score for one sample corpus_level_fn=np.mean, # aggregation ) diff --git a/community_tasks/arabic_evals.py b/community_tasks/arabic_evals.py index 55165074a..1d036fa83 100644 --- a/community_tasks/arabic_evals.py +++ b/community_tasks/arabic_evals.py @@ -32,11 +32,10 @@ from typing import Any, Dict, List, Optional, Union from lighteval.metrics.llm_as_judge import JudgeLM -from lighteval.metrics.metrics import Metric, MetricCategory, Metrics -from lighteval.metrics.utils.metric_utils import MetricUseCase +from lighteval.metrics.metrics import Metric, Metrics from lighteval.tasks.default_prompts import LETTER_INDICES from lighteval.tasks.lighteval_task import LightevalTaskConfig -from lighteval.tasks.requests import Doc +from lighteval.tasks.requests import Doc, SamplingMethod # fmt: off @@ -104,7 +103,7 @@ def __init__( hf_subset=hf_subset, prompt_function=arabic_mmlu_pfn, hf_repo="MBZUAI/ArabicMMLU", - metric=[Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc_norm], hf_avail_splits=["test"], evaluation_splits=["test"], few_shots_split=["dev"], @@ -166,7 +165,7 @@ def __init__( hf_subset=hf_subset, prompt_function=arabic_mmlu_ht_pfn, hf_repo="MBZUAI/human_translated_arabic_mmlu", - metric=[Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc_norm], hf_avail_splits=["test"], evaluation_splits=["test"], few_shots_split=None, @@ -231,7 +230,7 @@ def __init__( hf_subset=hf_subset, prompt_function=arabic_mmlu_mt_pfn, hf_repo="OALL/Arabic_MMLU", - metric=[Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc_norm], hf_avail_splits=["test", "dev"], evaluation_splits=["test"], few_shots_split="dev", @@ -287,7 +286,7 @@ def __init__( hf_subset=hf_subset, prompt_function=acva_pfn, hf_repo="OALL/ACVA", - metric=[Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc_norm], hf_avail_splits=["test", "validation"], evaluation_splits=["test"], few_shots_split="validation", @@ -344,7 +343,7 @@ def __init__( hf_subset=hf_subset, prompt_function=aratrust_pfn, hf_repo="asas-ai/AraTrust-categorized", - metric=[Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc_norm], hf_avail_splits=["train"], evaluation_splits=["train"], few_shots_split=None, @@ -393,7 +392,7 @@ def arabic_exams_pfn(line, task_name: str = None): evaluation_splits=["test"], few_shots_split="validation", few_shots_select="sequential", - metric=[Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc_norm], trust_dataset=True, version=0, ) @@ -444,7 +443,7 @@ def __init__( hf_subset=hf_subset, prompt_function=alghafa_pfn, hf_repo="OALL/AlGhafa-Arabic-LLM-Benchmark-Native", - metric=[Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc_norm], hf_avail_splits=["test", "validation"], evaluation_splits=["test"], few_shots_split="validation", @@ -471,7 +470,7 @@ def __init__( evaluation_splits=["test"], few_shots_split="validation", few_shots_select="sequential", - metric=[Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc_norm], trust_dataset=True, version=0, ) @@ -488,7 +487,7 @@ def __init__( evaluation_splits=["test"], few_shots_split="validation", few_shots_select="sequential", - metric=[Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc_norm], trust_dataset=True, version=0, ) @@ -505,7 +504,7 @@ def __init__( evaluation_splits=["test"], few_shots_split="validation", few_shots_select="sequential", - metric=[Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc_norm], trust_dataset=True, version=0, ) @@ -522,7 +521,7 @@ def __init__( evaluation_splits=["test"], few_shots_split="validation", few_shots_select="sequential", - metric=[Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc_norm], trust_dataset=True, version=0, ) @@ -539,7 +538,7 @@ def __init__( evaluation_splits=["test"], few_shots_split="validation", few_shots_select="sequential", - metric=[Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc_norm], trust_dataset=True, version=0, ) @@ -556,7 +555,7 @@ def __init__( evaluation_splits=["test"], few_shots_split="validation", few_shots_select="sequential", - metric=[Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc_norm], trust_dataset=True, version=0, ) @@ -594,7 +593,7 @@ def boolq_arabic_pfn(line, task_name: str = None): evaluation_splits=["test"], few_shots_split="validation", few_shots_select="sequential", - metric=[Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc_norm], trust_dataset=True, version=0, ) @@ -629,7 +628,7 @@ def copa_arabic_pfn(line, task_name: str = None): evaluation_splits=["test"], few_shots_split="validation", few_shots_select="sequential", - metric=[Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc_norm], trust_dataset=True, version=0, ) @@ -673,7 +672,7 @@ def hellaswag_arabic_pfn(line, task_name: str = None): evaluation_splits=["test"], few_shots_split="validation", few_shots_select="sequential", - metric=[Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc_norm], trust_dataset=True, version=0, ) @@ -710,7 +709,7 @@ def toxigen_arabic_pfn(line, task_name: str = None): evaluation_splits=["test"], few_shots_split="validation", few_shots_select="sequential", - metric=[Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc_norm], trust_dataset=True, version=0, ) @@ -761,7 +760,7 @@ def sciq_arabic_pfn(line, task_name: str = None): evaluation_splits=["test"], few_shots_split="validation", few_shots_select="sequential", - metric=[Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc_norm], trust_dataset=True, version=0, ) @@ -819,7 +818,7 @@ def __init__( hf_subset=hf_subset, prompt_function=madinah_qa_pfn, hf_repo="MBZUAI/MadinahQA", - metric=[Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc_norm], hf_avail_splits=["test"], evaluation_splits=["test"], few_shots_split=["dev"], @@ -849,11 +848,10 @@ def __init__(self, judge: JudgeLM): """ self.judge = judge self.metric_name = "llm_as_judge" - self.category = MetricCategory.LLM_AS_JUDGE + self.category = SamplingMethod.GENERATIVE self.corpus_level_fn = self.aggregate_scores self.sample_level_fn = self._sample_level_fn self.higher_is_better = True # Fixed tuple syntax - self.use_case = MetricUseCase.NONE def compute(self, responses: list[str], formatted_docs: list[Doc], **kwargs) -> dict[str, float]: """ @@ -1039,7 +1037,7 @@ def process_judge_response(response) -> float: hf_subset=None, hf_avail_splits=["train"], evaluation_splits=["train"], - metric=[wrapped_judge], + metrics=[wrapped_judge], trust_dataset=True, generation_size=200, stop_sequence=[], diff --git a/community_tasks/french_evals.py b/community_tasks/french_evals.py index 607d28eb8..75185113d 100644 --- a/community_tasks/french_evals.py +++ b/community_tasks/french_evals.py @@ -32,16 +32,7 @@ import random -import numpy as np -from aenum import extend_enum - -import lighteval.tasks.extended.ifeval.instructions_registry as instructions_registry -from lighteval.metrics.metrics import Metrics, SampleLevelMetric -from lighteval.metrics.utils.metric_utils import ( - MetricCategory, - MetricUseCase, - SampleLevelMetricGrouping, -) +from lighteval.metrics.metrics import Metrics from lighteval.tasks.default_prompts import LETTER_INDICES from lighteval.tasks.extended.ifeval.main import ifeval_metrics from lighteval.tasks.lighteval_task import LightevalTaskConfig @@ -106,7 +97,7 @@ def prompt_bac_fr(line, task_name: str = None): suite=["community"], hf_repo="fr-gouv-coordination-ia/IFEval-fr", hf_subset="default", - metric=[ifeval_metrics], + metrics=[ifeval_metrics], hf_avail_splits=["train"], evaluation_splits=["train"], few_shots_split="train", @@ -128,7 +119,7 @@ def prompt_bac_fr(line, task_name: str = None): few_shots_split=None, few_shots_select="random_sampling", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -146,7 +137,7 @@ def prompt_bac_fr(line, task_name: str = None): few_shots_split=None, few_shots_select="random_sampling", generation_size=1, - metric=[Metrics.quasi_exact_match_math, Metrics.exact_match], + metrics=[Metrics.quasi_exact_match_math, Metrics.exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, diff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml index 138c2daf2..377f2107d 100644 --- a/docs/source/_toctree.yml +++ b/docs/source/_toctree.yml @@ -41,9 +41,11 @@ - local: package_reference/evaluation_tracker title: EvaluationTracker - local: package_reference/models - title: Models and ModelConfigs + title: Model Configs - local: package_reference/pipeline title: Pipeline + - local: package_reference/models_outputs + title: Model's Output title: Main classes - local: package_reference/metrics title: Metrics diff --git a/docs/source/adding-a-custom-task.mdx b/docs/source/adding-a-custom-task.mdx index 312320c08..ae4076203 100644 --- a/docs/source/adding-a-custom-task.mdx +++ b/docs/source/adding-a-custom-task.mdx @@ -41,7 +41,6 @@ def prompt_fn(line, task_name: str = None): query=line["question"], choices=[f" {c}" for c in line["choices"]], gold_index=line["gold"], - instruction="", ) ``` @@ -53,8 +52,7 @@ in [`lighteval.metrics.metrics.Metrics`]) or [create a custom one](adding-a-new- custom_metric = SampleLevelMetric( metric_name="my_custom_metric_name", higher_is_better=True, - category=MetricCategory.IGNORED, - use_case=MetricUseCase.NONE, + category=SamplingMethod.{GENERATIVE,LOGPROBS}, sample_level_fn=lambda x: x, # how to compute score for one sample corpus_level_fn=np.mean, # How to aggregate the samples metrics ) @@ -77,7 +75,7 @@ task = LightevalTaskConfig( evaluation_splits=[], few_shots_split=None, few_shots_select=None, - metric=[], # select your metric in Metrics + metrics=[], # select your metric in Metrics ) ``` @@ -111,50 +109,6 @@ class CustomSubsetTask(LightevalTaskConfig): SUBSET_TASKS = [CustomSubsetTask(name=f"mytask:{subset}", hf_subset=subset) for subset in SAMPLE_SUBSETS] ``` -Here is a list of the parameters and their meaning: - -- `name` (str), your evaluation name -- `suite` (list), the suite(s) to which your evaluation should belong. This - field allows us to compare different task implementations and is used as a - task selection to differentiate the versions to launch. At the moment, you'll - find the keywords ["helm", "bigbench", "original", "lighteval", "community", - "custom"]; for core evals, please choose `lighteval`. -- `prompt_function` (Callable), the prompt function you defined in the step - above -- `hf_repo` (str), the path to your evaluation dataset on the hub -- `hf_subset` (str), the specific subset you want to use for your evaluation - (note: when the dataset has no subset, fill this field with `"default"`, not - with `None` or `""`) -- `hf_avail_splits` (list), all the splits available for your dataset (train, - valid or validation, test, other...) -- `evaluation_splits` (list), the splits you want to use for evaluation -- `few_shots_split` (str, can be `null`), the specific split from which you - want to select samples for your few-shot examples. It should be different - from the sets included in `evaluation_splits` -- `few_shots_select` (str, can be `null`), the method that you will use to - select items for your few-shot examples. Can be `null`, or one of: - - `balanced` select examples from the `few_shots_split` with balanced - labels, to avoid skewing the few shot examples (hence the model - generations) toward one specific label - - `random` selects examples at random from the `few_shots_split` - - `random_sampling` selects new examples at random from the - `few_shots_split` for every new item, but if a sampled item is equal to - the current one, it is removed from the available samples - - `random_sampling_from_train` selects new examples at random from the - `few_shots_split` for every new item, but if a sampled item is equal to - the current one, it is kept! Only use this if you know what you are - doing. - - `sequential` selects the first `n` examples of the `few_shots_split` -- `generation_size` (int), the maximum number of tokens allowed for a - generative evaluation. If your evaluation is a log likelihood evaluation - (multi-choice), this value should be -1 -- `stop_sequence` (list), a list of strings acting as end of sentence tokens - for your generation -- `metric` (list), the metrics you want to use for your evaluation (see next - section for a detailed explanation) -- `trust_dataset` (bool), set to True if you trust the dataset. - - Then you need to add your task to the `TASKS_TABLE` list. ```python diff --git a/docs/source/adding-a-new-metric.mdx b/docs/source/adding-a-new-metric.mdx index 6433d5883..970298a9c 100644 --- a/docs/source/adding-a-new-metric.mdx +++ b/docs/source/adding-a-new-metric.mdx @@ -15,7 +15,6 @@ If not, you can use the `custom_task` system to register your new metric: > to install the required dev dependencies by running `pip install -e .[dev]` > and then run `pre-commit install` to install the pre-commit hooks. - - Create a new Python file which should contain the full logic of your metric. - The file also needs to start with these imports @@ -24,20 +23,22 @@ from aenum import extend_enum from lighteval.metrics import Metrics ``` -You need to define a sample level metric: +You need to define a sample level metric, all sample level metrics will have the same signature, taking a +[`~lighteval.types.Doc`] and a [`~lighteval.types.ModelResponse`]. The metric should return a float or a +boolean. ```python -def custom_metric(predictions: list[str], formatted_doc: Doc, **kwargs) -> bool: - response = predictions[0] - return response == formatted_doc.choices[formatted_doc.gold_index] +def custom_metric(doc: Doc, model_response: ModelResponse) -> bool: + response = model_response.text[0] + return response == doc.choices[doc.gold_index] ``` Here the sample level metric only returns one metric, if you want to return multiple metrics per sample you need to return a dictionary with the metrics as keys and the values as values. ```python -def custom_metric(predictions: list[str], formatted_doc: Doc, **kwargs) -> dict: - response = predictions[0] - return {"accuracy": response == formatted_doc.choices[formatted_doc.gold_index], "other_metric": 0.5} +def custom_metric(doc: Doc, model_response: ModelResponse) -> dict: + response = model_response.text[0] + return {"accuracy": response == doc.choices[doc.gold_index], "other_metric": 0.5} ``` Then, you can define an aggregation function if needed, a common aggregation function is `np.mean`. @@ -56,8 +57,7 @@ with [`~metrics.utils.metric_utils.SampleLevelMetric`]: my_custom_metric = SampleLevelMetric( metric_name={custom_metric_name}, higher_is_better={either True or False}, - category={MetricCategory}, - use_case={MetricUseCase}, + category={SamplingMethod}, sample_level_fn=custom_metric, corpus_level_fn=agg_function, ) @@ -70,8 +70,7 @@ with [`~metrics.utils.metric_utils.SampleLevelMetricGrouping`]: custom_metric = SampleLevelMetricGrouping( metric_name={submetric_names}, higher_is_better={n: {True or False} for n in submetric_names}, - category={MetricCategory}, - use_case={MetricUseCase}, + category={SamplingMethod}, sample_level_fn=custom_metric, corpus_level_fn={ "accuracy": np.mean, diff --git a/docs/source/available-tasks.mdx b/docs/source/available-tasks.mdx index f340fabbd..5b80c082e 100644 --- a/docs/source/available-tasks.mdx +++ b/docs/source/available-tasks.mdx @@ -11,1246 +11,3 @@ You can also inspect a specific task by running: ```bash lighteval tasks inspect ``` - -## List of tasks - -- bigbench: - - bigbench|abstract_narrative_understanding - - bigbench|anachronisms - - bigbench|analogical_similarity - - bigbench|analytic_entailment - - bigbench|arithmetic_bb - - bigbench|ascii_word_recognition - - bigbench|authorship_verification - - bigbench|auto_categorization - - bigbench|auto_debugging - - bigbench|bbq_lite_json - - bigbench|bridging_anaphora_resolution_barqa - - bigbench|causal_judgment - - bigbench|cause_and_effect - - bigbench|checkmate_in_one - - bigbench|chess_state_tracking - - bigbench|chinese_remainder_theorem - - bigbench|cifar10_classification - - bigbench|code_line_description - - bigbench|codenames - - bigbench|color - - bigbench|common_morpheme - - bigbench|conceptual_combinations - - bigbench|conlang_translation - - bigbench|contextual_parametric_knowledge_conflicts - - bigbench|coqa_bb - - bigbench|crash_blossom - - bigbench|crass_ai - - bigbench|cryobiology_spanish - - bigbench|cryptonite - - bigbench|cs_algorithms - - bigbench|dark_humor_detection - - bigbench|date_understanding - - bigbench|disambiguation_qa - - bigbench|discourse_marker_prediction - - bigbench|disfl_qa - - bigbench|dyck_languages - - bigbench|elementary_math_qa - - bigbench|emoji_movie - - bigbench|emojis_emotion_prediction - - bigbench|empirical_judgments - - bigbench|english_proverbs - - bigbench|english_russian_proverbs - - bigbench|entailed_polarity - - bigbench|entailed_polarity_hindi - - bigbench|epistemic_reasoning - - bigbench|evaluating_information_essentiality - - bigbench|fact_checker - - bigbench|fantasy_reasoning - - bigbench|few_shot_nlg - - bigbench|figure_of_speech_detection - - bigbench|formal_fallacies_syllogisms_negation - - bigbench|gem - - bigbench|gender_inclusive_sentences_german - - bigbench|general_knowledge - - bigbench|geometric_shapes - - bigbench|goal_step_wikihow - - bigbench|gre_reading_comprehension - - bigbench|hhh_alignment - - bigbench|hindi_question_answering - - bigbench|hindu_knowledge - - bigbench|hinglish_toxicity - - bigbench|human_organs_senses - - bigbench|hyperbaton - - bigbench|identify_math_theorems - - bigbench|identify_odd_metaphor - - bigbench|implicatures - - bigbench|implicit_relations - - bigbench|intent_recognition - - bigbench|international_phonetic_alphabet_nli - - bigbench|international_phonetic_alphabet_transliterate - - bigbench|intersect_geometry - - bigbench|irony_identification - - bigbench|kanji_ascii - - bigbench|kannada - - bigbench|key_value_maps - - bigbench|known_unknowns - - bigbench|language_games - - bigbench|language_identification - - bigbench|linguistic_mappings - - bigbench|linguistics_puzzles - - bigbench|logic_grid_puzzle - - bigbench|logical_args - - bigbench|logical_deduction - - bigbench|logical_fallacy_detection - - bigbench|logical_sequence - - bigbench|mathematical_induction - - bigbench|matrixshapes - - bigbench|metaphor_boolean - - bigbench|metaphor_understanding - - bigbench|minute_mysteries_qa - - bigbench|misconceptions - - bigbench|misconceptions_russian - - bigbench|mnist_ascii - - bigbench|modified_arithmetic - - bigbench|moral_permissibility - - bigbench|movie_dialog_same_or_different - - bigbench|movie_recommendation - - bigbench|mult_data_wrangling - - bigbench|multiemo - - bigbench|natural_instructions - - bigbench|navigate - - bigbench|nonsense_words_grammar - - bigbench|novel_concepts - - bigbench|object_counting - - bigbench|odd_one_out - - bigbench|operators - - bigbench|paragraph_segmentation - - bigbench|parsinlu_qa - - bigbench|parsinlu_reading_comprehension - - bigbench|penguins_in_a_table - - bigbench|periodic_elements - - bigbench|persian_idioms - - bigbench|phrase_relatedness - - bigbench|physical_intuition - - bigbench|physics - - bigbench|physics_questions - - bigbench|play_dialog_same_or_different - - bigbench|polish_sequence_labeling - - bigbench|presuppositions_as_nli - - bigbench|qa_wikidata - - bigbench|question_selection - - bigbench|real_or_fake_text - - bigbench|reasoning_about_colored_objects - - bigbench|repeat_copy_logic - - bigbench|rephrase - - bigbench|rhyming - - bigbench|riddle_sense - - bigbench|ruin_names - - bigbench|salient_translation_error_detection - - bigbench|scientific_press_release - - bigbench|semantic_parsing_in_context_sparc - - bigbench|semantic_parsing_spider - - bigbench|sentence_ambiguity - - bigbench|similarities_abstraction - - bigbench|simp_turing_concept - - bigbench|simple_arithmetic_json - - bigbench|simple_arithmetic_json_multiple_choice - - bigbench|simple_arithmetic_json_subtasks - - bigbench|simple_arithmetic_multiple_targets_json - - bigbench|simple_ethical_questions - - bigbench|simple_text_editing - - bigbench|snarks - - bigbench|social_iqa - - bigbench|social_support - - bigbench|sports_understanding - - bigbench|strange_stories - - bigbench|strategyqa - - bigbench|sufficient_information - - bigbench|suicide_risk - - bigbench|swahili_english_proverbs - - bigbench|swedish_to_german_proverbs - - bigbench|symbol_interpretation - - bigbench|tellmewhy - - bigbench|temporal_sequences - - bigbench|tense - - bigbench|timedial - - bigbench|topical_chat - - bigbench|tracking_shuffled_objects - - bigbench|understanding_fables - - bigbench|undo_permutation - - bigbench|unit_conversion - - bigbench|unit_interpretation - - bigbench|unnatural_in_context_learning - - bigbench|vitaminc_fact_verification - - bigbench|what_is_the_tao - - bigbench|which_wiki_edit - - bigbench|wino_x_german - - bigbench|winowhy - - bigbench|word_sorting - - bigbench|word_unscrambling - -- harness: - - harness|bbh:boolean_expressions - - harness|bbh:causal_judgment - - harness|bbh:date_understanding - - harness|bbh:disambiguation_qa - - harness|bbh:dyck_languages - - harness|bbh:formal_fallacies - - harness|bbh:geometric_shapes - - harness|bbh:hyperbaton - - harness|bbh:logical_deduction_five_objects - - harness|bbh:logical_deduction_seven_objects - - harness|bbh:logical_deduction_three_objects - - harness|bbh:movie_recommendation - - harness|bbh:multistep_arithmetic_two - - harness|bbh:navigate - - harness|bbh:object_counting - - harness|bbh:penguins_in_a_table - - harness|bbh:reasoning_about_colored_objects - - harness|bbh:ruin_names - - harness|bbh:salient_translation_error_detection - - harness|bbh:snarks - - harness|bbh:sports_understanding - - harness|bbh:temporal_sequences - - harness|bbh:tracking_shuffled_objects_five_objects - - harness|bbh:tracking_shuffled_objects_seven_objects - - harness|bbh:tracking_shuffled_objects_three_objects - - harness|bbh:web_of_lies - - harness|bbh:word_sorting - - harness|bigbench:causal_judgment - - harness|bigbench:date_understanding - - harness|bigbench:disambiguation_qa - - harness|bigbench:geometric_shapes - - harness|bigbench:logical_deduction_five_objects - - harness|bigbench:logical_deduction_seven_objects - - harness|bigbench:logical_deduction_three_objects - - harness|bigbench:movie_recommendation - - harness|bigbench:navigate - - harness|bigbench:reasoning_about_colored_objects - - harness|bigbench:ruin_names - - harness|bigbench:salient_translation_error_detection - - harness|bigbench:snarks - - harness|bigbench:sports_understanding - - harness|bigbench:temporal_sequences - - harness|bigbench:tracking_shuffled_objects_five_objects - - harness|bigbench:tracking_shuffled_objects_seven_objects - - harness|bigbench:tracking_shuffled_objects_three_objects - - harness|wikitext:103:document_level - -- helm: - - helm|babi_qa - - helm|bbq - - helm|bbq:Age - - helm|bbq:Disability_status - - helm|bbq:Gender_identity - - helm|bbq:Physical_appearance - - helm|bbq:Race_ethnicity - - helm|bbq:Race_x_SES - - helm|bbq:Race_x_gender - - helm|bbq:Religion - - helm|bbq:SES - - helm|bbq:Sexual_orientation - - helm|bbq=Nationality - - helm|bigbench:auto_debugging - - helm|bigbench:bbq_lite_json:age_ambig - - helm|bigbench:bbq_lite_json:age_disambig - - helm|bigbench:bbq_lite_json:disability_status_ambig - - helm|bigbench:bbq_lite_json:disability_status_disambig - - helm|bigbench:bbq_lite_json:gender_identity_ambig - - helm|bigbench:bbq_lite_json:gender_identity_disambig - - helm|bigbench:bbq_lite_json:nationality_ambig - - helm|bigbench:bbq_lite_json:nationality_disambig - - helm|bigbench:bbq_lite_json:physical_appearance_ambig - - helm|bigbench:bbq_lite_json:physical_appearance_disambig - - helm|bigbench:bbq_lite_json:race_ethnicity_ambig - - helm|bigbench:bbq_lite_json:race_ethnicity_disambig - - helm|bigbench:bbq_lite_json:religion_ambig - - helm|bigbench:bbq_lite_json:religion_disambig - - helm|bigbench:bbq_lite_json:ses_ambig - - helm|bigbench:bbq_lite_json:ses_disambig - - helm|bigbench:bbq_lite_json:sexual_orientation_ambig - - helm|bigbench:bbq_lite_json:sexual_orientation_disambig - - helm|bigbench:code_line_description - - helm|bigbench:conceptual_combinations:contradictions - - helm|bigbench:conceptual_combinations:emergent_properties - - helm|bigbench:conceptual_combinations:fanciful_fictional_combinations - - helm|bigbench:conceptual_combinations:homonyms - - helm|bigbench:conceptual_combinations:invented_words - - helm|bigbench:conlang_translation:adna_from - - helm|bigbench:conlang_translation:adna_to - - helm|bigbench:conlang_translation:atikampe_from - - helm|bigbench:conlang_translation:atikampe_to - - helm|bigbench:conlang_translation:gornam_from - - helm|bigbench:conlang_translation:gornam_to - - helm|bigbench:conlang_translation:holuan_from - - helm|bigbench:conlang_translation:holuan_to - - helm|bigbench:conlang_translation:mkafala_from - - helm|bigbench:conlang_translation:mkafala_to - - helm|bigbench:conlang_translation:postpositive_english_from - - helm|bigbench:conlang_translation:postpositive_english_to - - helm|bigbench:conlang_translation:unapuri_from - - helm|bigbench:conlang_translation:unapuri_to - - helm|bigbench:conlang_translation:vaomi_from - - helm|bigbench:conlang_translation:vaomi_to - - helm|bigbench:emoji_movie - - helm|bigbench:formal_fallacies_syllogisms_negation - - helm|bigbench:hindu_knowledge - - helm|bigbench:known_unknowns - - helm|bigbench:language_identification - - helm|bigbench:linguistics_puzzles - - helm|bigbench:logic_grid_puzzle - - helm|bigbench:logical_deduction-five_objects - - helm|bigbench:logical_deduction-seven_objects - - helm|bigbench:logical_deduction-three_objects - - helm|bigbench:misconceptions_russian - - helm|bigbench:novel_concepts - - helm|bigbench:operators - - helm|bigbench:parsinlu_reading_comprehension - - helm|bigbench:play_dialog_same_or_different - - helm|bigbench:repeat_copy_logic - - helm|bigbench:strange_stories-boolean - - helm|bigbench:strange_stories-multiple_choice - - helm|bigbench:strategyqa - - helm|bigbench:symbol_interpretation-adversarial - - helm|bigbench:symbol_interpretation-emoji_agnostic - - helm|bigbench:symbol_interpretation-name_agnostic - - helm|bigbench:symbol_interpretation-plain - - helm|bigbench:symbol_interpretation-tricky - - helm|bigbench:vitaminc_fact_verification - - helm|bigbench:winowhy - - helm|blimp:adjunct_island - - helm|blimp:anaphor_gender_agreement - - helm|blimp:anaphor_number_agreement - - helm|blimp:animate_subject_passive - - helm|blimp:animate_subject_trans - - helm|blimp:causative - - helm|blimp:complex_NP_island - - helm|blimp:coordinate_structure_constraint_complex_left_branch - - helm|blimp:coordinate_structure_constraint_object_extraction - - helm|blimp:determiner_noun_agreement_1 - - helm|blimp:determiner_noun_agreement_2 - - helm|blimp:determiner_noun_agreement_irregular_1 - - helm|blimp:determiner_noun_agreement_irregular_2 - - helm|blimp:determiner_noun_agreement_with_adj_2 - - helm|blimp:determiner_noun_agreement_with_adj_irregular_1 - - helm|blimp:determiner_noun_agreement_with_adj_irregular_2 - - helm|blimp:determiner_noun_agreement_with_adjective_1 - - helm|blimp:distractor_agreement_relational_noun - - helm|blimp:distractor_agreement_relative_clause - - helm|blimp:drop_argument - - helm|blimp:ellipsis_n_bar_1 - - helm|blimp:ellipsis_n_bar_2 - - helm|blimp:existential_there_object_raising - - helm|blimp:existential_there_quantifiers_1 - - helm|blimp:existential_there_quantifiers_2 - - helm|blimp:existential_there_subject_raising - - helm|blimp:expletive_it_object_raising - - helm|blimp:inchoative - - helm|blimp:intransitive - - helm|blimp:irregular_past_participle_adjectives - - helm|blimp:irregular_past_participle_verbs - - helm|blimp:irregular_plural_subject_verb_agreement_1 - - helm|blimp:irregular_plural_subject_verb_agreement_2 - - helm|blimp:left_branch_island_echo_question - - helm|blimp:left_branch_island_simple_question - - helm|blimp:matrix_question_npi_licensor_present - - helm|blimp:npi_present_1 - - helm|blimp:npi_present_2 - - helm|blimp:only_npi_licensor_present - - helm|blimp:only_npi_scope - - helm|blimp:passive_1 - - helm|blimp:passive_2 - - helm|blimp:principle_A_c_command - - helm|blimp:principle_A_case_1 - - helm|blimp:principle_A_case_2 - - helm|blimp:principle_A_domain_1 - - helm|blimp:principle_A_domain_2 - - helm|blimp:principle_A_domain_3 - - helm|blimp:principle_A_reconstruction - - helm|blimp:regular_plural_subject_verb_agreement_1 - - helm|blimp:regular_plural_subject_verb_agreement_2 - - helm|blimp:sentential_negation_npi_licensor_present - - helm|blimp:sentential_negation_npi_scope - - helm|blimp:sentential_subject_island - - helm|blimp:superlative_quantifiers_1 - - helm|blimp:superlative_quantifiers_2 - - helm|blimp:tough_vs_raising_1 - - helm|blimp:tough_vs_raising_2 - - helm|blimp:transitive - - helm|blimp:wh_island - - helm|blimp:wh_questions_object_gap - - helm|blimp:wh_questions_subject_gap - - helm|blimp:wh_questions_subject_gap_long_distance - - helm|blimp:wh_vs_that_no_gap - - helm|blimp:wh_vs_that_no_gap_long_distance - - helm|blimp:wh_vs_that_with_gap - - helm|blimp:wh_vs_that_with_gap_long_distance - - helm|bold - - helm|bold:gender - - helm|bold:political_ideology - - helm|bold:profession - - helm|bold:race - - helm|bold:religious_ideology - - helm|boolq - - helm|boolq:contrastset - - helm|civil_comments - - helm|civil_comments:LGBTQ - - helm|civil_comments:black - - helm|civil_comments:christian - - helm|civil_comments:female - - helm|civil_comments:male - - helm|civil_comments:muslim - - helm|civil_comments:other_religions - - helm|civil_comments:white - - helm|commonsenseqa - - helm|copyright:n_books_1000-extractions_per_book_1-prefix_length_125 - - helm|copyright:n_books_1000-extractions_per_book_1-prefix_length_25 - - helm|copyright:n_books_1000-extractions_per_book_1-prefix_length_5 - - helm|copyright:n_books_1000-extractions_per_book_3-prefix_length_125 - - helm|copyright:n_books_1000-extractions_per_book_3-prefix_length_25 - - helm|copyright:n_books_1000-extractions_per_book_3-prefix_length_5 - - helm|copyright:oh_the_places - - helm|copyright:pilot - - helm|copyright:popular_books-prefix_length_10 - - helm|copyright:popular_books-prefix_length_125 - - helm|copyright:popular_books-prefix_length_25 - - helm|copyright:popular_books-prefix_length_250 - - helm|copyright:popular_books-prefix_length_5 - - helm|copyright:popular_books-prefix_length_50 - - helm|copyright:prompt_num_line_1-min_lines_20 - - helm|copyright:prompt_num_line_10-min_lines_20 - - helm|copyright:prompt_num_line_5-min_lines_20 - - helm|covid_dialogue - - helm|dyck_language:2 - - helm|dyck_language:3 - - helm|dyck_language:4 - - helm|entity_data_imputation:Buy - - helm|entity_data_imputation:Restaurant - - helm|entity_matching:Abt_Buy - - helm|entity_matching:Amazon_Google - - helm|entity_matching:Beer - - helm|entity_matching:Company - - helm|entity_matching:DBLP_ACM - - helm|entity_matching:DBLP_GoogleScholar - - helm|entity_matching:Dirty_DBLP_ACM - - helm|entity_matching:Dirty_DBLP_GoogleScholar - - helm|entity_matching:Dirty_Walmart_Amazon - - helm|entity_matching:Dirty_iTunes_Amazon - - helm|entity_matching:Walmart_Amazon - - helm|entity_matching:iTunes_Amazon - - helm|entity_matching=Fodors_Zagats - - helm|hellaswag - - helm|imdb - - helm|imdb:contrastset - - helm|interactive_qa_mmlu:abstract_algebra - - helm|interactive_qa_mmlu:college_chemistry - - helm|interactive_qa_mmlu:global_facts - - helm|interactive_qa_mmlu:miscellaneous - - helm|interactive_qa_mmlu:nutrition - - helm|interactive_qa_mmlu:us_foreign_policy - - helm|legal_summarization:billsum - - helm|legal_summarization:eurlexsum - - helm|legal_summarization:multilexsum - - helm|legalsupport - - helm|lexglue:case_hold - - helm|lexglue:ecthr_a - - helm|lexglue:ecthr_b - - helm|lexglue:eurlex - - helm|lexglue:ledgar - - helm|lexglue:scotus - - helm|lexglue:unfair_tos - - helm|lextreme:brazilian_court_decisions_judgment - - helm|lextreme:brazilian_court_decisions_unanimity - - helm|lextreme:covid19_emergency_event - - helm|lextreme:german_argument_mining - - helm|lextreme:greek_legal_code_chapter - - helm|lextreme:greek_legal_code_subject - - helm|lextreme:greek_legal_code_volume - - helm|lextreme:greek_legal_ner - - helm|lextreme:legalnero - - helm|lextreme:lener_br - - helm|lextreme:mapa_coarse - - helm|lextreme:mapa_fine - - helm|lextreme:multi_eurlex_level_1 - - helm|lextreme:multi_eurlex_level_2 - - helm|lextreme:multi_eurlex_level_3 - - helm|lextreme:online_terms_of_service_clause_topics - - helm|lextreme:online_terms_of_service_unfairness_levels - - helm|lextreme:swiss_judgment_prediction - - helm|lsat_qa - - helm|lsat_qa:assignment - - helm|lsat_qa:grouping - - helm|lsat_qa:miscellaneous - - helm|lsat_qa:ordering - - helm|me_q_sum - - helm|med_dialog:healthcaremagic - - helm|med_dialog:icliniq - - helm|med_mcqa - - helm|med_paragraph_simplification - - helm|med_qa - - helm|mmlu - - helm|mmlu:abstract_algebra - - helm|mmlu:anatomy - - helm|mmlu:astronomy - - helm|mmlu:business_ethics - - helm|mmlu:clinical_knowledge - - helm|mmlu:college_biology - - helm|mmlu:college_chemistry - - helm|mmlu:college_computer_science - - helm|mmlu:college_mathematics - - helm|mmlu:college_medicine - - helm|mmlu:college_physics - - helm|mmlu:computer_security - - helm|mmlu:conceptual_physics - - helm|mmlu:econometrics - - helm|mmlu:electrical_engineering - - helm|mmlu:elementary_mathematics - - helm|mmlu:formal_logic - - helm|mmlu:global_facts - - helm|mmlu:high_school_biology - - helm|mmlu:high_school_chemistry - - helm|mmlu:high_school_computer_science - - helm|mmlu:high_school_european_history - - helm|mmlu:high_school_geography - - helm|mmlu:high_school_government_and_politics - - helm|mmlu:high_school_macroeconomics - - helm|mmlu:high_school_mathematics - - helm|mmlu:high_school_microeconomics - - helm|mmlu:high_school_physics - - helm|mmlu:high_school_psychology - - helm|mmlu:high_school_statistics - - helm|mmlu:high_school_us_history - - helm|mmlu:high_school_world_history - - helm|mmlu:human_aging - - helm|mmlu:human_sexuality - - helm|mmlu:international_law - - helm|mmlu:jurisprudence - - helm|mmlu:logical_fallacies - - helm|mmlu:machine_learning - - helm|mmlu:management - - helm|mmlu:marketing - - helm|mmlu:medical_genetics - - helm|mmlu:miscellaneous - - helm|mmlu:moral_disputes - - helm|mmlu:moral_scenarios - - helm|mmlu:nutrition - - helm|mmlu:philosophy - - helm|mmlu:prehistory - - helm|mmlu:professional_accounting - - helm|mmlu:professional_law - - helm|mmlu:professional_medicine - - helm|mmlu:professional_psychology - - helm|mmlu:public_relations - - helm|mmlu:security_studies - - helm|mmlu:sociology - - helm|mmlu:us_foreign_policy - - helm|mmlu:virology - - helm|mmlu:world_religions - - helm|narrativeqa - - helm|numeracy:linear_example - - helm|numeracy:linear_standard - - helm|numeracy:parabola_example - - helm|numeracy:parabola_standard - - helm|numeracy:paraboloid_example - - helm|numeracy:paraboloid_standard - - helm|numeracy:plane_example - - helm|numeracy:plane_standard - - helm|openbookqa - - helm|piqa - - helm|pubmedqa - - helm|quac - - helm|raft:ade_corpus_v2 - - helm|raft:banking_77 - - helm|raft:neurips_impact_statement_risks - - helm|raft:one_stop_english - - helm|raft:overruling - - helm|raft:semiconductor_org_types - - helm|raft:systematic_review_inclusion - - helm|raft:tai_safety_research - - helm|raft:terms_of_service - - helm|raft:tweet_eval_hate - - helm|raft:twitter_complaints - - helm|real_toxicity_prompts - - helm|siqa - - helm|summarization:cnn-dm - - helm|summarization:xsum - - helm|summarization:xsum-sampled - - helm|synthetic_reasoning:induction - - helm|synthetic_reasoning:natural_easy - - helm|synthetic_reasoning:natural_hard - - helm|synthetic_reasoning:pattern_match - - helm|synthetic_reasoning:variable_substitution - - helm|the_pile:arxiv - - helm|the_pile:bibliotik - - helm|the_pile:commoncrawl - - helm|the_pile:dm-mathematics - - helm|the_pile:enron - - helm|the_pile:europarl - - helm|the_pile:freelaw - - helm|the_pile:github - - helm|the_pile:gutenberg - - helm|the_pile:hackernews - - helm|the_pile:nih-exporter - - helm|the_pile:opensubtitles - - helm|the_pile:openwebtext2 - - helm|the_pile:pubmed-abstracts - - helm|the_pile:pubmed-central - - helm|the_pile:stackexchange - - helm|the_pile:upsto - - helm|the_pile:wikipedia - - helm|the_pile:youtubesubtitles - - helm|truthfulqa - - helm|twitterAAE:aa - - helm|twitterAAE:white - - helm|wikifact:applies_to_jurisdiction - - helm|wikifact:atomic_number - - helm|wikifact:author - - helm|wikifact:award_received - - helm|wikifact:basic_form_of_government - - helm|wikifact:capital - - helm|wikifact:capital_of - - helm|wikifact:central_bank - - helm|wikifact:composer - - helm|wikifact:continent - - helm|wikifact:country - - helm|wikifact:country_of_citizenship - - helm|wikifact:country_of_origin - - helm|wikifact:creator - - helm|wikifact:currency - - helm|wikifact:defendant - - helm|wikifact:developer - - helm|wikifact:diplomatic_relation - - helm|wikifact:director - - helm|wikifact:discoverer_or_inventor - - helm|wikifact:drug_or_therapy_used_for_treatment - - helm|wikifact:educated_at - - helm|wikifact:electron_configuration - - helm|wikifact:employer - - helm|wikifact:field_of_work - - helm|wikifact:file_extension - - helm|wikifact:genetic_association - - helm|wikifact:genre - - helm|wikifact:has_part - - helm|wikifact:head_of_government - - helm|wikifact:head_of_state - - helm|wikifact:headquarters_location - - helm|wikifact:industry - - helm|wikifact:influenced_by - - helm|wikifact:instance_of - - helm|wikifact:instrument - - helm|wikifact:language_of_work_or_name - - helm|wikifact:languages_spoken_written_or_signed - - helm|wikifact:laws_applied - - helm|wikifact:located_in_the_administrative_territorial_entity - - helm|wikifact:location - - helm|wikifact:location_of_discovery - - helm|wikifact:location_of_formation - - helm|wikifact:majority_opinion_by - - helm|wikifact:manufacturer - - helm|wikifact:measured_physical_quantity - - helm|wikifact:medical_condition_treated - - helm|wikifact:member_of - - helm|wikifact:member_of_political_party - - helm|wikifact:member_of_sports_team - - helm|wikifact:movement - - helm|wikifact:named_after - - helm|wikifact:native_language - - helm|wikifact:number_of_processor_cores - - helm|wikifact:occupation - - helm|wikifact:office_held_by_head_of_government - - helm|wikifact:office_held_by_head_of_state - - helm|wikifact:official_language - - helm|wikifact:operating_system - - helm|wikifact:original_language_of_film_or_TV_show - - helm|wikifact:original_network - - helm|wikifact:overrules - - helm|wikifact:owned_by - - helm|wikifact:part_of - - helm|wikifact:participating_team - - helm|wikifact:place_of_birth - - helm|wikifact:place_of_death - - helm|wikifact:plaintiff - - helm|wikifact:position_held - - helm|wikifact:position_played_on_team - - helm|wikifact:programming_language - - helm|wikifact:recommended_unit_of_measurement - - helm|wikifact:record_label - - helm|wikifact:religion - - helm|wikifact:repealed_by - - helm|wikifact:shares_border_with - - helm|wikifact:solved_by - - helm|wikifact:statement_describes - - helm|wikifact:stock_exchange - - helm|wikifact:subclass_of - - helm|wikifact:subsidiary - - helm|wikifact:symptoms_and_signs - - helm|wikifact:therapeutic_area - - helm|wikifact:time_of_discovery_or_invention - - helm|wikifact:twinned_administrative_body - - helm|wikifact:work_location - - helm|wikitext:103:document_level - - helm|wmt14:cs-en - - helm|wmt14:de-en - - helm|wmt14:fr-en - - helm|wmt14:hi-en - - helm|wmt14:ru-en - -- leaderboard: - - leaderboard|arc:challenge - - leaderboard|gsm8k - - leaderboard|hellaswag - - leaderboard|mmlu:abstract_algebra - - leaderboard|mmlu:anatomy - - leaderboard|mmlu:astronomy - - leaderboard|mmlu:business_ethics - - leaderboard|mmlu:clinical_knowledge - - leaderboard|mmlu:college_biology - - leaderboard|mmlu:college_chemistry - - leaderboard|mmlu:college_computer_science - - leaderboard|mmlu:college_mathematics - - leaderboard|mmlu:college_medicine - - leaderboard|mmlu:college_physics - - leaderboard|mmlu:computer_security - - leaderboard|mmlu:conceptual_physics - - leaderboard|mmlu:econometrics - - leaderboard|mmlu:electrical_engineering - - leaderboard|mmlu:elementary_mathematics - - leaderboard|mmlu:formal_logic - - leaderboard|mmlu:global_facts - - leaderboard|mmlu:high_school_biology - - leaderboard|mmlu:high_school_chemistry - - leaderboard|mmlu:high_school_computer_science - - leaderboard|mmlu:high_school_european_history - - leaderboard|mmlu:high_school_geography - - leaderboard|mmlu:high_school_government_and_politics - - leaderboard|mmlu:high_school_macroeconomics - - leaderboard|mmlu:high_school_mathematics - - leaderboard|mmlu:high_school_microeconomics - - leaderboard|mmlu:high_school_physics - - leaderboard|mmlu:high_school_psychology - - leaderboard|mmlu:high_school_statistics - - leaderboard|mmlu:high_school_us_history - - leaderboard|mmlu:high_school_world_history - - leaderboard|mmlu:human_aging - - leaderboard|mmlu:human_sexuality - - leaderboard|mmlu:international_law - - leaderboard|mmlu:jurisprudence - - leaderboard|mmlu:logical_fallacies - - leaderboard|mmlu:machine_learning - - leaderboard|mmlu:management - - leaderboard|mmlu:marketing - - leaderboard|mmlu:medical_genetics - - leaderboard|mmlu:miscellaneous - - leaderboard|mmlu:moral_disputes - - leaderboard|mmlu:moral_scenarios - - leaderboard|mmlu:nutrition - - leaderboard|mmlu:philosophy - - leaderboard|mmlu:prehistory - - leaderboard|mmlu:professional_accounting - - leaderboard|mmlu:professional_law - - leaderboard|mmlu:professional_medicine - - leaderboard|mmlu:professional_psychology - - leaderboard|mmlu:public_relations - - leaderboard|mmlu:security_studies - - leaderboard|mmlu:sociology - - leaderboard|mmlu:us_foreign_policy - - leaderboard|mmlu:virology - - leaderboard|mmlu:world_religions - - leaderboard|truthfulqa:mc - - leaderboard|winogrande - -- lighteval: - - lighteval|agieval:aqua-rat - - lighteval|agieval:gaokao-biology - - lighteval|agieval:gaokao-chemistry - - lighteval|agieval:gaokao-chinese - - lighteval|agieval:gaokao-english - - lighteval|agieval:gaokao-geography - - lighteval|agieval:gaokao-history - - lighteval|agieval:gaokao-mathqa - - lighteval|agieval:gaokao-physics - - lighteval|agieval:logiqa-en - - lighteval|agieval:logiqa-zh - - lighteval|agieval:lsat-ar - - lighteval|agieval:lsat-lr - - lighteval|agieval:lsat-rc - - lighteval|agieval:sat-en - - lighteval|agieval:sat-en-without-passage - - lighteval|agieval:sat-math - - lighteval|anli - - lighteval|anli:r1 - - lighteval|anli:r2 - - lighteval|anli:r3 - - lighteval|arc:easy - - lighteval|arithmetic:1dc - - lighteval|arithmetic:2da - - lighteval|arithmetic:2dm - - lighteval|arithmetic:2ds - - lighteval|arithmetic:3da - - lighteval|arithmetic:3ds - - lighteval|arithmetic:4da - - lighteval|arithmetic:4ds - - lighteval|arithmetic:5da - - lighteval|arithmetic:5ds - - lighteval|asdiv - - lighteval|bigbench:causal_judgment - - lighteval|bigbench:date_understanding - - lighteval|bigbench:disambiguation_qa - - lighteval|bigbench:geometric_shapes - - lighteval|bigbench:logical_deduction_five_objects - - lighteval|bigbench:logical_deduction_seven_objects - - lighteval|bigbench:logical_deduction_three_objects - - lighteval|bigbench:movie_recommendation - - lighteval|bigbench:navigate - - lighteval|bigbench:reasoning_about_colored_objects - - lighteval|bigbench:ruin_names - - lighteval|bigbench:salient_translation_error_detection - - lighteval|bigbench:snarks - - lighteval|bigbench:sports_understanding - - lighteval|bigbench:temporal_sequences - - lighteval|bigbench:tracking_shuffled_objects_five_objects - - lighteval|bigbench:tracking_shuffled_objects_seven_objects - - lighteval|bigbench:tracking_shuffled_objects_three_objects - - lighteval|blimp:adjunct_island - - lighteval|blimp:anaphor_gender_agreement - - lighteval|blimp:anaphor_number_agreement - - lighteval|blimp:animate_subject_passive - - lighteval|blimp:animate_subject_trans - - lighteval|blimp:causative - - lighteval|blimp:complex_NP_island - - lighteval|blimp:coordinate_structure_constraint_complex_left_branch - - lighteval|blimp:coordinate_structure_constraint_object_extraction - - lighteval|blimp:determiner_noun_agreement_1 - - lighteval|blimp:determiner_noun_agreement_2 - - lighteval|blimp:determiner_noun_agreement_irregular_1 - - lighteval|blimp:determiner_noun_agreement_irregular_2 - - lighteval|blimp:determiner_noun_agreement_with_adj_2 - - lighteval|blimp:determiner_noun_agreement_with_adj_irregular_1 - - lighteval|blimp:determiner_noun_agreement_with_adj_irregular_2 - - lighteval|blimp:determiner_noun_agreement_with_adjective_1 - - lighteval|blimp:distractor_agreement_relational_noun - - lighteval|blimp:distractor_agreement_relative_clause - - lighteval|blimp:drop_argument - - lighteval|blimp:ellipsis_n_bar_1 - - lighteval|blimp:ellipsis_n_bar_2 - - lighteval|blimp:existential_there_object_raising - - lighteval|blimp:existential_there_quantifiers_1 - - lighteval|blimp:existential_there_quantifiers_2 - - lighteval|blimp:existential_there_subject_raising - - lighteval|blimp:expletive_it_object_raising - - lighteval|blimp:inchoative - - lighteval|blimp:intransitive - - lighteval|blimp:irregular_past_participle_adjectives - - lighteval|blimp:irregular_past_participle_verbs - - lighteval|blimp:irregular_plural_subject_verb_agreement_1 - - lighteval|blimp:irregular_plural_subject_verb_agreement_2 - - lighteval|blimp:left_branch_island_echo_question - - lighteval|blimp:left_branch_island_simple_question - - lighteval|blimp:matrix_question_npi_licensor_present - - lighteval|blimp:npi_present_1 - - lighteval|blimp:npi_present_2 - - lighteval|blimp:only_npi_licensor_present - - lighteval|blimp:only_npi_scope - - lighteval|blimp:passive_1 - - lighteval|blimp:passive_2 - - lighteval|blimp:principle_A_c_command - - lighteval|blimp:principle_A_case_1 - - lighteval|blimp:principle_A_case_2 - - lighteval|blimp:principle_A_domain_1 - - lighteval|blimp:principle_A_domain_2 - - lighteval|blimp:principle_A_domain_3 - - lighteval|blimp:principle_A_reconstruction - - lighteval|blimp:regular_plural_subject_verb_agreement_1 - - lighteval|blimp:regular_plural_subject_verb_agreement_2 - - lighteval|blimp:sentential_negation_npi_licensor_present - - lighteval|blimp:sentential_negation_npi_scope - - lighteval|blimp:sentential_subject_island - - lighteval|blimp:superlative_quantifiers_1 - - lighteval|blimp:superlative_quantifiers_2 - - lighteval|blimp:tough_vs_raising_1 - - lighteval|blimp:tough_vs_raising_2 - - lighteval|blimp:transitive - - lighteval|blimp:wh_island - - lighteval|blimp:wh_questions_object_gap - - lighteval|blimp:wh_questions_subject_gap - - lighteval|blimp:wh_questions_subject_gap_long_distance - - lighteval|blimp:wh_vs_that_no_gap - - lighteval|blimp:wh_vs_that_no_gap_long_distance - - lighteval|blimp:wh_vs_that_with_gap - - lighteval|blimp:wh_vs_that_with_gap_long_distance - - lighteval|coqa - - lighteval|coqa_bb - - lighteval|drop - - lighteval|ethics:commonsense - - lighteval|ethics:deontology - - lighteval|ethics:justice - - lighteval|ethics:utilitarianism - - lighteval|ethics:virtue - - lighteval|glue:cola - - lighteval|glue:mnli - - lighteval|glue:mnli_mismatched - - lighteval|glue:mrpc - - lighteval|glue:qnli - - lighteval|glue:qqp - - lighteval|glue:rte - - lighteval|glue:sst2 - - lighteval|glue:stsb - - lighteval|glue:wnli - - lighteval|gpqa - - lighteval|gsm8k - - lighteval|headqa:en - - lighteval|headqa:es - - lighteval|iwslt17:ar-en - - lighteval|iwslt17:de-en - - lighteval|iwslt17:en-ar - - lighteval|iwslt17:en-de - - lighteval|iwslt17:en-fr - - lighteval|iwslt17:en-ja - - lighteval|iwslt17:en-ko - - lighteval|iwslt17:en-zh - - lighteval|iwslt17:fr-en - - lighteval|iwslt17:ja-en - - lighteval|iwslt17:ko-en - - lighteval|iwslt17:zh-en - - lighteval|lambada:openai - - lighteval|lambada:openai:de - - lighteval|lambada:openai:en - - lighteval|lambada:openai:es - - lighteval|lambada:openai:fr - - lighteval|lambada:openai:it - - lighteval|lambada:openai_cloze - - lighteval|lambada:standard - - lighteval|lambada:standard_cloze - - lighteval|logiqa - - lighteval|math:algebra - - lighteval|math:counting_and_probability - - lighteval|math:geometry - - lighteval|math:intermediate_algebra - - lighteval|math:number_theory - - lighteval|math:prealgebra - - lighteval|math:precalculus - - lighteval|math_cot:algebra - - lighteval|math_cot:counting_and_probability - - lighteval|math_cot:geometry - - lighteval|math_cot:intermediate_algebra - - lighteval|math_cot:number_theory - - lighteval|math_cot:prealgebra - - lighteval|math_cot:precalculus - - lighteval|mathqa - - lighteval|mgsm:bn - - lighteval|mgsm:de - - lighteval|mgsm:en - - lighteval|mgsm:es - - lighteval|mgsm:fr - - lighteval|mgsm:ja - - lighteval|mgsm:ru - - lighteval|mgsm:sw - - lighteval|mgsm:te - - lighteval|mgsm:th - - lighteval|mgsm:zh - - lighteval|mtnt2019:en-fr - - lighteval|mtnt2019:en-ja - - lighteval|mtnt2019:fr-en - - lighteval|mtnt2019:ja-en - - lighteval|mutual - - lighteval|mutual_plus - - lighteval|openbookqa - - lighteval|piqa - - lighteval|prost - - lighteval|pubmedqa - - lighteval|qa4mre:2011 - - lighteval|qa4mre:2012 - - lighteval|qa4mre:2013 - - lighteval|qasper - - lighteval|qasper_ll - - lighteval|race:high - - lighteval|sciq - - lighteval|storycloze:2016 - - lighteval|storycloze:2018 - - lighteval|super_glue:boolq - - lighteval|super_glue:cb - - lighteval|super_glue:copa - - lighteval|super_glue:multirc - - lighteval|super_glue:rte - - lighteval|super_glue:wic - - lighteval|super_glue:wsc - - lighteval|swag - - lighteval|the_pile:arxiv - - lighteval|the_pile:bookcorpus2 - - lighteval|the_pile:books3 - - lighteval|the_pile:dm-mathematics - - lighteval|the_pile:enron - - lighteval|the_pile:europarl - - lighteval|the_pile:freelaw - - lighteval|the_pile:github - - lighteval|the_pile:gutenberg - - lighteval|the_pile:hackernews - - lighteval|the_pile:nih-exporter - - lighteval|the_pile:opensubtitles - - lighteval|the_pile:openwebtext2 - - lighteval|the_pile:philpapers - - lighteval|the_pile:pile-cc - - lighteval|the_pile:pubmed-abstracts - - lighteval|the_pile:pubmed-central - - lighteval|the_pile:stackexchange - - lighteval|the_pile:ubuntu-irc - - lighteval|the_pile:uspto - - lighteval|the_pile:wikipedia - - lighteval|the_pile:youtubesubtitles - - lighteval|toxigen - - lighteval|triviaqa - - lighteval|truthfulqa:gen - - lighteval|unscramble:anagrams1 - - lighteval|unscramble:anagrams2 - - lighteval|unscramble:cycle_letters - - lighteval|unscramble:random_insertion - - lighteval|unscramble:reversed_words - - lighteval|webqs - - lighteval|wikitext:2 - - lighteval|wmt08:cs-en - - lighteval|wmt08:de-en - - lighteval|wmt08:en-cs - - lighteval|wmt08:en-de - - lighteval|wmt08:en-es - - lighteval|wmt08:en-fr - - lighteval|wmt08:en-hu - - lighteval|wmt08:es-en - - lighteval|wmt08:fr-en - - lighteval|wmt08:hu-en - - lighteval|wmt09:cs-en - - lighteval|wmt09:de-en - - lighteval|wmt09:en-cs - - lighteval|wmt09:en-de - - lighteval|wmt09:en-es - - lighteval|wmt09:en-fr - - lighteval|wmt09:en-hu - - lighteval|wmt09:en-it - - lighteval|wmt09:es-en - - lighteval|wmt09:fr-en - - lighteval|wmt09:hu-en - - lighteval|wmt09:it-en - - lighteval|wmt10:cs-en - - lighteval|wmt10:de-en - - lighteval|wmt10:en-cs - - lighteval|wmt10:en-de - - lighteval|wmt10:en-es - - lighteval|wmt10:en-fr - - lighteval|wmt10:es-en - - lighteval|wmt10:fr-en - - lighteval|wmt11:cs-en - - lighteval|wmt11:de-en - - lighteval|wmt11:en-cs - - lighteval|wmt11:en-de - - lighteval|wmt11:en-es - - lighteval|wmt11:en-fr - - lighteval|wmt11:es-en - - lighteval|wmt11:fr-en - - lighteval|wmt12:cs-en - - lighteval|wmt12:de-en - - lighteval|wmt12:en-cs - - lighteval|wmt12:en-de - - lighteval|wmt12:en-es - - lighteval|wmt12:en-fr - - lighteval|wmt12:es-en - - lighteval|wmt12:fr-en - - lighteval|wmt13:cs-en - - lighteval|wmt13:de-en - - lighteval|wmt13:en-cs - - lighteval|wmt13:en-de - - lighteval|wmt13:en-es - - lighteval|wmt13:en-fr - - lighteval|wmt13:en-ru - - lighteval|wmt13:es-en - - lighteval|wmt13:fr-en - - lighteval|wmt13:ru-en - - lighteval|wmt14:cs-en - - lighteval|wmt14:de-en - - lighteval|wmt14:en-cs - - lighteval|wmt14:en-de - - lighteval|wmt14:en-fr - - lighteval|wmt14:en-hi - - lighteval|wmt14:en-ru - - lighteval|wmt14:fr-en - - lighteval|wmt14:hi-en - - lighteval|wmt14:ru-en - - lighteval|wmt15:cs-en - - lighteval|wmt15:de-en - - lighteval|wmt15:en-cs - - lighteval|wmt15:en-de - - lighteval|wmt15:en-fi - - lighteval|wmt15:en-fr - - lighteval|wmt15:en-ru - - lighteval|wmt15:fi-en - - lighteval|wmt15:fr-en - - lighteval|wmt15:ru-en - - lighteval|wmt16:cs-en - - lighteval|wmt16:de-en - - lighteval|wmt16:en-cs - - lighteval|wmt16:en-de - - lighteval|wmt16:en-fi - - lighteval|wmt16:en-ro - - lighteval|wmt16:en-ru - - lighteval|wmt16:en-tr - - lighteval|wmt16:fi-en - - lighteval|wmt16:ro-en - - lighteval|wmt16:ru-en - - lighteval|wmt16:tr-en - - lighteval|wmt17:cs-en - - lighteval|wmt17:de-en - - lighteval|wmt17:en-cs - - lighteval|wmt17:en-de - - lighteval|wmt17:en-fi - - lighteval|wmt17:en-lv - - lighteval|wmt17:en-ru - - lighteval|wmt17:en-tr - - lighteval|wmt17:en-zh - - lighteval|wmt17:fi-en - - lighteval|wmt17:lv-en - - lighteval|wmt17:ru-en - - lighteval|wmt17:tr-en - - lighteval|wmt17:zh-en - - lighteval|wmt18:cs-en - - lighteval|wmt18:de-en - - lighteval|wmt18:en-cs - - lighteval|wmt18:en-de - - lighteval|wmt18:en-et - - lighteval|wmt18:en-fi - - lighteval|wmt18:en-ru - - lighteval|wmt18:en-tr - - lighteval|wmt18:en-zh - - lighteval|wmt18:et-en - - lighteval|wmt18:fi-en - - lighteval|wmt18:ru-en - - lighteval|wmt18:tr-en - - lighteval|wmt18:zh-en - - lighteval|wmt19:cs-de - - lighteval|wmt19:de-cs - - lighteval|wmt19:de-en - - lighteval|wmt19:de-fr - - lighteval|wmt19:en-cs - - lighteval|wmt19:en-de - - lighteval|wmt19:en-fi - - lighteval|wmt19:en-gu - - lighteval|wmt19:en-kk - - lighteval|wmt19:en-lt - - lighteval|wmt19:en-ru - - lighteval|wmt19:en-zh - - lighteval|wmt19:fi-en - - lighteval|wmt19:fr-de - - lighteval|wmt19:gu-en - - lighteval|wmt19:kk-en - - lighteval|wmt19:lt-en - - lighteval|wmt19:ru-en - - lighteval|wmt19:zh-en - - lighteval|wmt20:cs-en - - lighteval|wmt20:de-en - - lighteval|wmt20:de-fr - - lighteval|wmt20:en-cs - - lighteval|wmt20:en-de - - lighteval|wmt20:en-iu - - lighteval|wmt20:en-ja - - lighteval|wmt20:en-km - - lighteval|wmt20:en-pl - - lighteval|wmt20:en-ps - - lighteval|wmt20:en-ru - - lighteval|wmt20:en-ta - - lighteval|wmt20:en-zh - - lighteval|wmt20:fr-de - - lighteval|wmt20:iu-en - - lighteval|wmt20:ja-en - - lighteval|wmt20:km-en - - lighteval|wmt20:pl-en - - lighteval|wmt20:ps-en - - lighteval|wmt20:ru-en - - lighteval|wmt20:ta-en - - lighteval|wmt20:zh-en - - lighteval|wsc273 - - lighteval|xcopa:en - - lighteval|xcopa:et - - lighteval|xcopa:ht - - lighteval|xcopa:id - - lighteval|xcopa:it - - lighteval|xcopa:qu - - lighteval|xcopa:sw - - lighteval|xcopa:ta - - lighteval|xcopa:th - - lighteval|xcopa:tr - - lighteval|xcopa:vi - - lighteval|xcopa:zh - - lighteval|xstory_cloze:ar - - lighteval|xstory_cloze:en - - lighteval|xstory_cloze:es - - lighteval|xstory_cloze:eu - - lighteval|xstory_cloze:hi - - lighteval|xstory_cloze:id - - lighteval|xstory_cloze:my - - lighteval|xstory_cloze:ru - - lighteval|xstory_cloze:sw - - lighteval|xstory_cloze:te - - lighteval|xstory_cloze:zh - - lighteval|xwinograd:en - - lighteval|xwinograd:fr - - lighteval|xwinograd:jp - - lighteval|xwinograd:pt - - lighteval|xwinograd:ru - - lighteval|xwinograd:zh - -- original: - - original|arc:c:letters - - original|arc:c:options - - original|arc:c:simple - - original|mmlu - - original|mmlu:abstract_algebra - - original|mmlu:anatomy - - original|mmlu:astronomy - - original|mmlu:business_ethics - - original|mmlu:clinical_knowledge - - original|mmlu:college_biology - - original|mmlu:college_chemistry - - original|mmlu:college_computer_science - - original|mmlu:college_mathematics - - original|mmlu:college_medicine - - original|mmlu:college_physics - - original|mmlu:computer_security - - original|mmlu:conceptual_physics - - original|mmlu:econometrics - - original|mmlu:electrical_engineering - - original|mmlu:elementary_mathematics - - original|mmlu:formal_logic - - original|mmlu:global_facts - - original|mmlu:high_school_biology - - original|mmlu:high_school_chemistry - - original|mmlu:high_school_computer_science - - original|mmlu:high_school_european_history - - original|mmlu:high_school_geography - - original|mmlu:high_school_government_and_politics - - original|mmlu:high_school_macroeconomics - - original|mmlu:high_school_mathematics - - original|mmlu:high_school_microeconomics - - original|mmlu:high_school_physics - - original|mmlu:high_school_psychology - - original|mmlu:high_school_statistics - - original|mmlu:high_school_us_history - - original|mmlu:high_school_world_history - - original|mmlu:human_aging - - original|mmlu:human_sexuality - - original|mmlu:international_law - - original|mmlu:jurisprudence - - original|mmlu:logical_fallacies - - original|mmlu:machine_learning - - original|mmlu:management - - original|mmlu:marketing - - original|mmlu:medical_genetics - - original|mmlu:miscellaneous - - original|mmlu:moral_disputes - - original|mmlu:moral_scenarios - - original|mmlu:nutrition - - original|mmlu:philosophy - - original|mmlu:prehistory - - original|mmlu:professional_accounting - - original|mmlu:professional_law - - original|mmlu:professional_medicine - - original|mmlu:professional_psychology - - original|mmlu:public_relations - - original|mmlu:security_studies - - original|mmlu:sociology - - original|mmlu:us_foreign_policy - - original|mmlu:virology - - original|mmlu:world_religions diff --git a/docs/source/package_reference/models.mdx b/docs/source/package_reference/models.mdx index 6a0660a4a..f5ee5a785 100644 --- a/docs/source/package_reference/models.mdx +++ b/docs/source/package_reference/models.mdx @@ -1,40 +1,43 @@ -# Models +# Model Configs -## Model -### LightevalModel -[[autodoc]] models.abstract_model.LightevalModel +The model configs are used to define the model and its parameters. All the parameters can be +set in the `model-args` or in the model yaml file (see example +[here](https://github.com/huggingface/lighteval/blob/main/examples/model_configs/vllm_model_config.yaml)). +### Base model config +[[autodoc]] models.utils.ModelConfig -## Accelerate and Transformers Models -### TransformersModel -[[autodoc]] models.transformers.transformers_model.TransformersModelConfig -[[autodoc]] models.transformers.transformers_model.TransformersModel +## Local Models -### AdapterModel +### Transformers Model +[[autodoc]] models.transformers.transformers_model.TransformersModelConfig [[autodoc]] models.transformers.adapter_model.AdapterModelConfig -[[autodoc]] models.transformers.adapter_model.AdapterModel - -### DeltaModel [[autodoc]] models.transformers.delta_model.DeltaModelConfig -[[autodoc]] models.transformers.delta_model.DeltaModel + +### VLLM Model +[[autodoc]] models.vllm.vllm_model.VLLMModelConfig + +### SGLang Model +[[autodoc]] models.sglang.sglang_model.SGLangModelConfig + +### Dummy Model +[[autodoc]] models.dummy.dummy_model.DummyModelConfig + ## Endpoints-based Models + +### Inference Providers Model +[[autodoc]] models.endpoints.inference_providers_model.InferenceProvidersModelConfig + ### InferenceEndpointModel [[autodoc]] models.endpoints.endpoint_model.InferenceEndpointModelConfig [[autodoc]] models.endpoints.endpoint_model.ServerlessEndpointModelConfig -[[autodoc]] models.endpoints.endpoint_model.InferenceEndpointModel ### TGI ModelClient [[autodoc]] models.endpoints.tgi_model.TGIModelConfig -[[autodoc]] models.endpoints.tgi_model.ModelClient -### Custom Model -[[autodoc]] models.custom.custom_model.CustomModelConfig +### Litellm Model +[[autodoc]] models.litellm_model.LiteLLMModelConfig -### Open AI Models -[[autodoc]] models.endpoints.openai_model.OpenAIClient - -## VLLM Model -### VLLMModel -[[autodoc]] models.vllm.vllm_model.VLLMModelConfig -[[autodoc]] models.vllm.vllm_model.VLLMModel +## Custom Model +[[autodoc]] models.custom.custom_model.CustomModelConfig diff --git a/docs/source/package_reference/models_outputs.mdx b/docs/source/package_reference/models_outputs.mdx new file mode 100644 index 000000000..8bc0737a1 --- /dev/null +++ b/docs/source/package_reference/models_outputs.mdx @@ -0,0 +1,5 @@ +# Model's Output + +All models will generate an ouput per Doc supplied to the `generation` or `loglikelihood` fuctions. + +[[autodoc]] lighteval.models.model_output.ModelResponse diff --git a/docs/source/package_reference/tasks.mdx b/docs/source/package_reference/tasks.mdx index c1a84b00a..96b082b3c 100644 --- a/docs/source/package_reference/tasks.mdx +++ b/docs/source/package_reference/tasks.mdx @@ -7,32 +7,17 @@ [[autodoc]] tasks.lighteval_task.LightevalTask ## PromptManager - [[autodoc]] tasks.prompt_manager.PromptManager ## Registry - [[autodoc]] tasks.registry.Registry -## Requests - -[[autodoc]] tasks.requests.Request - -[[autodoc]] tasks.requests.LoglikelihoodRequest - -[[autodoc]] tasks.requests.LoglikelihoodSingleTokenRequest - -[[autodoc]] tasks.requests.LoglikelihoodRollingRequest - -[[autodoc]] tasks.requests.GreedyUntilRequest - -[[autodoc]] tasks.requests.GreedyUntilMultiTurnRequest +## Doc +[[autodoc]] tasks.requests.Doc ## Datasets - [[autodoc]] data.DynamicBatchDataset [[autodoc]] data.LoglikelihoodDataset -[[autodoc]] data.LoglikelihoodSingleTokenDataset [[autodoc]] data.GenerativeTaskDataset [[autodoc]] data.GenerativeTaskDatasetNanotron [[autodoc]] data.GenDistributedSampler diff --git a/docs/source/quicktour.mdx b/docs/source/quicktour.mdx index 06c06021b..69eb52a06 100644 --- a/docs/source/quicktour.mdx +++ b/docs/source/quicktour.mdx @@ -15,10 +15,9 @@ Lighteval can be used with a few different commands. - `lighteval vllm`: evaluate models on one or more GPUs using [🚀 VLLM](https://github.com/vllm-project/vllm) - `lighteval endpoint` - - `inference-endpoint`: evaluate models on one or more GPUs using [🔗 - Inference Endpoint](https://huggingface.co/inference-endpoints/dedicated) - - `tgi`: evaluate models on one or more GPUs using [🔗 Text Generation Inference](https://huggingface.co/docs/text-generation-inference/en/index) - - `openai`: evaluate models on one or more GPUs using [🔗 OpenAI API](https://platform.openai.com/) + - `inference-endpoint`: evaluate models using Hugging Face's [Inference Endpoints's API](https://huggingface.co/inference-endpoints/dedicated) + - `tgi`: evaluate models using [🔗 Text Generation Inference](https://huggingface.co/docs/text-generation-inference/en/index) running locally. + - `litellm`: evaluate models on any compatible API using [litellm](https://www.litellm.ai/) ## Basic usage @@ -106,62 +105,10 @@ GPUs. ## Backend configuration The `model-args` argument takes a string representing a list of model -argument. The arguments allowed vary depending on the backend you use (vllm or -accelerate). - -### Accelerate - -- **pretrained** (str): - HuggingFace Hub model ID name or the path to a pre-trained - model to load. This is effectively the `pretrained_model_name_or_path` - argument of `from_pretrained` in the HuggingFace `transformers` API. -- **tokenizer** (Optional[str]): HuggingFace Hub tokenizer ID that will be - used for tokenization. -- **multichoice_continuations_start_space** (Optional[bool]): Whether to add a - space at the start of each continuation in multichoice generation. - For example, context: "What is the capital of France?" and choices: "Paris", "London". - Will be tokenized as: "What is the capital of France? Paris" and "What is the capital of France? London". - True adds a space, False strips a space, None does nothing -- **subfolder** (Optional[str]): The subfolder within the model repository. -- **revision** (str): The revision of the model. -- **max_gen_toks** (Optional[int]): The maximum number of tokens to generate. -- **max_length** (Optional[int]): The maximum length of the generated output. -- **add_special_tokens** (bool, optional, defaults to True): Whether to add special tokens to the input sequences. - If `None`, the default value will be set to `True` for seq2seq models (e.g. T5) and - `False` for causal models. -- **model_parallel** (bool, optional, defaults to None): - True/False: force to use or not the `accelerate` library to load a large - model across multiple devices. - Default: None which corresponds to comparing the number of processes with - the number of GPUs. If it's smaller => model-parallelism, else not. -- **dtype** (Union[str, torch.dtype], optional, defaults to None):): - Converts the model weights to `dtype`, if specified. Strings get - converted to `torch.dtype` objects (e.g. `float16` -> `torch.float16`). - Use `dtype="auto"` to derive the type from the model's weights. -- **device** (Union[int, str]): device to use for model training. -- **quantization_config** (Optional[BitsAndBytesConfig]): quantization - configuration for the model, manually provided to load a normally floating point - model at a quantized precision. Needed for 4-bit and 8-bit precision. -- **trust_remote_code** (bool): Whether to trust remote code during model - loading. - -### VLLM - -- **pretrained** (str): HuggingFace Hub model ID name or the path to a pre-trained model to load. -- **gpu_memory_utilization** (float): The fraction of GPU memory to use. -- **batch_size** (int): The batch size for model training. -- **revision** (str): The revision of the model. -- **dtype** (str, None): The data type to use for the model. -- **tensor_parallel_size** (int): The number of tensor parallel units to use. -- **data_parallel_size** (int): The number of data parallel units to use. -- **max_model_length** (int): The maximum length of the model. -- **swap_space** (int): The CPU swap space size (GiB) per GPU. -- **seed** (int): The seed to use for the model. -- **trust_remote_code** (bool): Whether to trust remote code during model loading. -- **use_chat_template** (bool): Whether to use the chat template or not. -- **add_special_tokens** (bool): Whether to add special tokens to the input sequences. -- **multichoice_continuations_start_space** (bool): Whether to add a space at the start of each continuation in multichoice generation. -- **subfolder** (Optional[str]): The subfolder within the model repository. +argument. The arguments allowed vary depending on the backend you use and +correspond to the fields of the model configs. + +The model config can be found [here](./package_reference/models). ## Nanotron diff --git a/docs/source/saving-and-reading-results.mdx b/docs/source/saving-and-reading-results.mdx index 51f893a44..2a54aeaf4 100644 --- a/docs/source/saving-and-reading-results.mdx +++ b/docs/source/saving-and-reading-results.mdx @@ -98,22 +98,9 @@ for detail in details: The detail file contains the following columns: -- `choices`: The choices presented to the model in the case of mutlichoice tasks. -- `gold`: The gold answer. -- `gold_index`: The index of the gold answer in the choices list. -- `cont_tokens`: The continuation tokens. -- `example`: The input in text form. -- `full_prompt`: The full prompt, that will be inputted to the model. -- `input_tokens`: The tokens of the full prompt. -- `instruction`: The instruction given to the model. -- `metrics`: The metrics computed for the example. -- `num_asked_few_shots`: The number of few shots asked to the model. -- `num_effective_few_shots`: The number of effective few shots. -- `padded`: Whether the input was padded. -- `pred_logits`: The logits of the model. -- `predictions`: The predictions of the model. -- `specifics`: The specifics of the task. -- `truncated`: Whether the input was truncated. +- __doc__: The doc used for the evaluation, this will contain the gold reference, the fewshots and other hyperparamters used for the task. +- __model_response__: where you will find model generations, logprobs and the input that was sent to the model +- __metric__: the value of the metrics for this sample ## Example of a result file diff --git a/examples/custom_models/google_translate_model.py b/examples/custom_models/google_translate_model.py index 935dc8f5e..048a89508 100644 --- a/examples/custom_models/google_translate_model.py +++ b/examples/custom_models/google_translate_model.py @@ -58,7 +58,7 @@ def __init__(self, config) -> None: model_name=config.model_name, model_sha="", model_dtype=None, - model_size="", + model_size=-1, ) self._tokenizer = AutoTokenizer.from_pretrained("gpt2") # Use a dummy tokenizer for compatibility diff --git a/examples/custom_models/local_mt_model.py b/examples/custom_models/local_mt_model.py index 8e3f49184..487cdbe92 100644 --- a/examples/custom_models/local_mt_model.py +++ b/examples/custom_models/local_mt_model.py @@ -93,7 +93,7 @@ def __init__(self, config, env_config) -> None: model_name=config.model, model_sha="", model_dtype=None, - model_size="", + model_size=-1, ) # Update model initialization to handle both models diff --git a/examples/custom_tasks_templates/custom_yourbench_task.py b/examples/custom_tasks_templates/custom_yourbench_task.py index 2693be855..c223ea378 100644 --- a/examples/custom_tasks_templates/custom_yourbench_task.py +++ b/examples/custom_tasks_templates/custom_yourbench_task.py @@ -29,13 +29,9 @@ from lighteval.metrics.metrics import Metrics from lighteval.metrics.metrics_sample import JudgeLLM -from lighteval.metrics.utils.metric_utils import ( - CorpusLevelMetricGrouping, - MetricCategory, - MetricUseCase, -) +from lighteval.metrics.utils.metric_utils import CorpusLevelMetricGrouping from lighteval.tasks.lighteval_task import LightevalTaskConfig -from lighteval.tasks.requests import Doc +from lighteval.tasks.requests import Doc, SamplingMethod logger = logging.getLogger(__name__) @@ -243,8 +239,7 @@ def yourbench_prompt(line, task_name: str = ""): yourbench_metrics = CorpusLevelMetricGrouping( metric_name=["accuracy"], higher_is_better={"accuracy": True}, - category=MetricCategory.LLM_AS_JUDGE, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.GENERATIVE, sample_level_fn=JudgeLLMYourBench().compute, corpus_level_fn={"accuracy": np.mean}, ) @@ -261,7 +256,7 @@ def yourbench_prompt(line, task_name: str = ""): few_shots_split=None, few_shots_select=None, generation_size=8192, - metric=[Metrics.yourbench_metrics], + metrics=[Metrics.yourbench_metrics], stop_sequence=[], trust_dataset=True, version=0, diff --git a/examples/custom_tasks_tests.py b/examples/custom_tasks_tests.py index b1c54e347..46b2f18ab 100644 --- a/examples/custom_tasks_tests.py +++ b/examples/custom_tasks_tests.py @@ -36,7 +36,7 @@ few_shots_split=None, few_shots_select="random_sampling_from_train", generation_size=512, - metric=[Metrics.expr_gold_metric], + metrics=[Metrics.expr_gold_metric], stop_sequence=None, trust_dataset=True, version=0, @@ -53,7 +53,7 @@ few_shots_split=None, few_shots_select=None, generation_size=2048, - metric=[Metrics.gpqa_instruct_pass_at_1_1n], + metrics=[Metrics.gpqa_instruct_pass_at_1_1n], stop_sequence=[], # no stop sequence, will use eos token trust_dataset=True, version=0, diff --git a/examples/model_configs/inference_providers.yaml b/examples/model_configs/inference_providers.yaml index c08185f49..9d8dab81e 100644 --- a/examples/model_configs/inference_providers.yaml +++ b/examples/model_configs/inference_providers.yaml @@ -1,10 +1,8 @@ model_parameters: - model_name: "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B" - provider: "novita" + model_name: "meta-llama/Llama-3.1-8B-Instruct" + provider: "nebius" timeout: null proxies: null parallel_calls_count: 20 generation_parameters: - temperature: 0.8 - top_k: 10 - max_new_tokens: 10000 + temperature: 0.4 diff --git a/examples/model_configs/sglang_model_config.yaml b/examples/model_configs/sglang_model_config.yaml index 159847e12..2dd64918e 100644 --- a/examples/model_configs/sglang_model_config.yaml +++ b/examples/model_configs/sglang_model_config.yaml @@ -6,7 +6,7 @@ model_parameters: context_length: null random_seed: 1 trust_remote_code: False - use_chat_template: False + use_chat_template: True device: "cuda" skip_tokenizer_init: False kv_cache_dtype: "auto" diff --git a/examples/model_configs/transformers_model.yaml b/examples/model_configs/transformers_model.yaml index 3d264a495..5145d8252 100644 --- a/examples/model_configs/transformers_model.yaml +++ b/examples/model_configs/transformers_model.yaml @@ -6,6 +6,7 @@ model_parameters: model_parallel: false batch_size: 1 multichoice_continuations_start_space: null # If true/false, will force multiple choice continuations to start/not start with a space. If none, will do nothing + use_chat_template: true generation_parameters: - temperature: 0.2 + temperature: 0.0 top_p: 0.9 diff --git a/examples/model_configs/transformers_vlm_model.yaml b/examples/model_configs/transformers_vlm_model.yaml index 6a32c0932..da9f43ca4 100644 --- a/examples/model_configs/transformers_vlm_model.yaml +++ b/examples/model_configs/transformers_vlm_model.yaml @@ -5,6 +5,8 @@ model_parameters: compile: false model_parallel: false batch_size: 1 + use_fast_image_processor: true + use_chat_template: true generation_parameters: - temperature: 0.2 + temperature: 0.0 top_p: 0.9 diff --git a/examples/model_configs/vllm_model_config.yaml b/examples/model_configs/vllm_model_config.yaml index fb1d27fb3..66714a298 100644 --- a/examples/model_configs/vllm_model_config.yaml +++ b/examples/model_configs/vllm_model_config.yaml @@ -17,11 +17,12 @@ model_parameters: subfolder: null max_num_seqs: 1 max_num_batched_tokens: 8192 + is_async: false generation_parameters: presence_penalty: 0.0 repetition_penalty: 1.0 frequency_penalty: 0.0 - temperature: 0.3 + temperature: 0.0 top_k: null min_p: 0.0 top_p: 0.9 diff --git a/pyproject.toml b/pyproject.toml index 5b9d863df..abd1897f8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -96,7 +96,7 @@ nanotron = [ tensorboardX = ["tensorboardX"] vllm = ["vllm>=0.8.4", "ray", "more_itertools"] quality = ["ruff>=v0.11.0","pre-commit"] -tests = ["pytest==7.4.0","deepdiff"] +tests = ["pytest>=7.4.0","deepdiff"] dev = ["lighteval[accelerate,quality,tests,multilingual,math,extended_tasks,vllm]"] docs = ["hf-doc-builder", "watchdog"] extended_tasks = [ diff --git a/src/lighteval/data.py b/src/lighteval/data.py index 90d43ac23..eae2bb39a 100644 --- a/src/lighteval/data.py +++ b/src/lighteval/data.py @@ -28,6 +28,8 @@ from packaging import version from torch.utils.data import Dataset, Subset +from lighteval.tasks.requests import Doc + if version.parse(torch.__version__) >= version.parse("2.5.0"): from torch.utils.data.distributed import DistributedSampler, _T_co @@ -35,14 +37,6 @@ from torch.utils.data.distributed import DistributedSampler from torch.utils.data.distributed import T_co as _T_co -from lighteval.tasks.requests import ( - GreedyUntilRequest, - LoglikelihoodRequest, - LoglikelihoodRollingRequest, - LoglikelihoodSingleTokenRequest, - Request, -) - logger = logging.getLogger(__name__) @@ -50,7 +44,7 @@ class DynamicBatchDataset(Dataset): def __init__( self, - requests: list, + requests: list[Doc], num_dataset_splits: int, ): """ @@ -129,7 +123,7 @@ def splits_iterator(self) -> Iterator[Subset]: split_start, split_end = self.splits[i] yield Subset(self, range(split_start, split_end)) - def __getitem__(self, index) -> Request: + def __getitem__(self, index) -> Doc: """ Get an item from the dataset. @@ -152,7 +146,7 @@ def __len__(self) -> int: """ return len(self.sorted_data) - def __iter__(self) -> Iterator[Request]: + def __iter__(self) -> Iterator[Doc]: """ Iterator that yields the items of the dataset depending on the split we are currently in. For instance, if we are in split 0, we will get the @@ -166,12 +160,12 @@ def __iter__(self) -> Iterator[Request]: for i in range(len(self)): yield self.sorted_data[i] - def _sorting_criteria(self, request) -> int: + def _sorting_criteria(self, doc: Doc): raise NotImplementedError() class LoglikelihoodDataset(DynamicBatchDataset): - def _sorting_criteria(self, request: LoglikelihoodRequest | LoglikelihoodRollingRequest) -> int: + def _sorting_criteria(self, doc: Doc) -> int: """ Collates the input data for batching. @@ -191,27 +185,9 @@ def _sorting_criteria(self, request: LoglikelihoodRequest | LoglikelihoodRolling Returns: tuple: A tuple containing the sorted input data. """ - toks = request.tokenized_context + request.tokenized_continuation - return -len(toks) - - -class LoglikelihoodSingleTokenDataset(DynamicBatchDataset): - def _sorting_criteria(self, request: LoglikelihoodSingleTokenRequest) -> int: - """ - Collates the input data for batching. - - the negative sign on len(toks) sorts descending - this has a few # advantages: - - time estimates will always be over not underestimates, which is - more useful for planning - - to know the size of a batch when going through the list, you - know the first one is always the batch padded context length. this - is useful to simplify the batching logic and more importantly to make - automatic adaptive batches much much easier to implement - - any OOMs will happen right away rather than near the end - """ - # We take only the prompt, no need for the continuation (since it's a list of single tokens) - toks = request.tokenized_context - return -len(toks) + len_doc_query = len(doc.query) + max_len_choices = max(len(choice) for choice in doc.choices) if doc.choices else 0 + return -(len_doc_query + max_len_choices) class GenerativeTaskDataset(DynamicBatchDataset): @@ -256,7 +232,7 @@ def init_split_limits(self, num_dataset_splits): splits_indices = [tuple(e) for e in splits_indices] return num_dataset_splits, splits_indices - def _sorting_criteria(self, request: GreedyUntilRequest) -> tuple[bool, bool, list, int, int]: + def _sorting_criteria(self, doc: Doc) -> tuple[int, bool, tuple, int, int]: """ Collate function for generating batches. @@ -266,24 +242,25 @@ def _sorting_criteria(self, request: GreedyUntilRequest) -> tuple[bool, bool, li Returns: Any: The collated data. """ - toks = request.context - gen_length = request.generation_size + query = doc.query + gen_length = doc.generation_size # The generative task has no limit except the model context if gen_length is None: gen_length = 0 + stop_sequences = doc.stop_sequences or [] return ( - request.do_sample, - request.use_logits, - tuple(request.stop_sequence), + doc.num_samples, + doc.use_logits, + tuple(stop_sequences), gen_length, - -(len(toks) + gen_length), + -(len(query) + gen_length), ) class GenerativeTaskDatasetNanotron(GenerativeTaskDataset): - def __getitem__(self, index) -> Request: + def __getitem__(self, index) -> tuple[int, Doc]: """ Get an item from the dataset depending on the split we are currently in. For instance, if we are in split 0, we will get the item at index 0, if diff --git a/src/lighteval/logging/evaluation_tracker.py b/src/lighteval/logging/evaluation_tracker.py index f2fe74f84..5a8f8553f 100644 --- a/src/lighteval/logging/evaluation_tracker.py +++ b/src/lighteval/logging/evaluation_tracker.py @@ -196,6 +196,21 @@ def details(self): for task_name, task_details in self.details_logger.details.items() } + def preview_outputs(self) -> None: + logger.info("Previewing outputs for your eval run, one per task") + from pprint import pprint + + for task_name, task_details in self.details_logger.details.items(): + logger.info(f"Task: {task_name}") + detail = task_details[0] + # We convert the detail to a markdown string + model_response = detail.model_response + metrics = detail.metric + + pprint(model_response.text) + pprint(model_response.input) + pprint(metrics) + def save(self) -> None: """Saves the experiment information and results to files, and to the hub if requested.""" logger.info("Saving experiment tracker") diff --git a/src/lighteval/logging/info_loggers.py b/src/lighteval/logging/info_loggers.py index ea5fc5f88..e48648d23 100644 --- a/src/lighteval/logging/info_loggers.py +++ b/src/lighteval/logging/info_loggers.py @@ -25,20 +25,17 @@ import os import time from dataclasses import asdict, dataclass, field -from typing import Optional, Union +from typing import Union import git -import numpy as np import xxhash -from lighteval.metrics import MetricCategory from lighteval.metrics.stderr import get_stderr_function from lighteval.models.abstract_model import ModelInfo from lighteval.models.model_output import ModelResponse from lighteval.tasks.lighteval_task import LightevalTask, LightevalTaskConfig from lighteval.tasks.requests import Doc from lighteval.utils.imports import is_nanotron_available -from lighteval.utils.utils import as_list, sanitize_numpy logger = logging.getLogger(__name__) @@ -196,23 +193,9 @@ class Detail: """ - example: str = "" - instruction: str = "" - full_prompt: str = "" - num_effective_few_shots: int = 0 - num_asked_few_shots: int = 0 - predictions: list = field(default_factory=list) - prediction_logits: list = field(default_factory=list) - input_tokens: list = field(default_factory=list) - cont_tokens: list = field(default_factory=list) - truncated: list = field(default_factory=list) - padded: list = field(default_factory=list) - gold: list = field(default_factory=list) - pred_logits: list = field(default_factory=list) - choices: list = field(default_factory=list) - gold_index: list = field(default_factory=list) - metrics: dict = field(default_factory=dict) - specifics: dict = field(default_factory=dict) + doc: Doc + model_response: ModelResponse + metric: dict @dataclass class CompiledDetail: @@ -317,11 +300,9 @@ class CompiledHash: def log( self, task_name: str, - task: LightevalTask, doc: Doc, - outputs: list[ModelResponse], + model_response: ModelResponse, metrics: dict, - llm_as_prompt_judgement: Optional[tuple[str, str]] = None, ) -> None: """Stores the relevant information for one sample of one task to the total list of samples stored in the DetailsLogger. @@ -334,79 +315,13 @@ def log( llm_as_prompt_judgement (tuple[str, str]): Tuple containing the prompt passed to the judge and the judgement for the current sample when using llm-as-judge metric. """ - detail = self.Detail() - detail.example = doc.query - detail.instruction = doc.instruction - detail.full_prompt = doc.ctx - - predictions = [model_response.get_result_for_eval() for model_response in outputs] - - if isinstance(predictions[0], list): - # loglikelihood_single_token returns a list of list of floats (but has - # only one request), we therefore need to flatten the responses in this case. - predictions = [x for resp in predictions for x in resp] - - detail.predictions = predictions - detail.input_tokens = [o.input_tokens for o in outputs] - detail.cont_tokens = [o.generated_tokens for o in outputs] - detail.truncated = [o.truncated_tokens_count for o in outputs] - detail.padded = [o.padded_tokens_count for o in outputs] - detail.num_effective_few_shots = doc.num_effective_few_shots - detail.num_asked_few_shots = doc.num_asked_few_shots - - pred_saved = False - if ( - task.has_metric_category[MetricCategory.PERPLEXITY] - or task.has_metric_category[MetricCategory.TARGET_PERPLEXITY] - ): - pred_saved = True - pass # should we log something? - if ( - task.has_metric_category[MetricCategory.GENERATIVE] - or task.has_metric_category[MetricCategory.GENERATIVE_SAMPLING] - ): - detail.gold = doc.get_golds() - pred_saved = True - if task.has_metric_category[MetricCategory.GENERATIVE_LOGPROB]: - detail.gold = doc.get_golds() - detail.pred_logits = [o.logits for o in outputs] - pred_saved = True - if task.has_metric_category[MetricCategory.MULTICHOICE]: - detail.choices = doc.choices - detail.gold_index = as_list(doc.gold_index) - pred_saved = True - if task.has_metric_category[MetricCategory.MULTICHOICE_ONE_TOKEN]: - detail.choices = doc.choices - detail.gold_index = as_list(doc.gold_index) - pred_saved = True - if task.has_metric_category[MetricCategory.MULTICHOICE_PMI]: - detail.choices = doc.choices - detail.gold_index = as_list(doc.gold_index) - doc.specific = {**(doc.specific or {}), **{"unconditioned_query": doc.unconditioned_query}} - pred_saved = True - if ( - task.has_metric_category[MetricCategory.LLM_AS_JUDGE_MULTI_TURN] - or task.has_metric_category[MetricCategory.LLM_AS_JUDGE] - ): - detail.choices = doc.choices - detail.gold_index = as_list(doc.gold_index) - pred_saved = True - - detail.specifics = doc.specific - - if not pred_saved: - raise NotImplementedError( - "No metric prediction saved." - ) # We probably need to handle this case if we're here. - - detail.metrics = sanitize_numpy(metrics) + detail = self.Detail(doc, model_response, metrics) self.details[task_name].append(detail) hash = self.Hash() hash.example = xxhash.xxh64(doc.query).hexdigest() - hash.full_prompt = xxhash.xxh64(str(doc.ctx)).hexdigest() - hash.input_tokens = xxhash.xxh64(str([o.input_tokens for o in outputs])).hexdigest() - hash.cont_tokens = xxhash.xxh64(str([o.generated_tokens for o in outputs])).hexdigest() + hash.input_tokens = xxhash.xxh64(str(model_response.input_tokens)).hexdigest() + hash.cont_tokens = xxhash.xxh64(str(model_response.output_tokens)).hexdigest() self.hashes[task_name].append(hash) def aggregate(self): @@ -431,20 +346,8 @@ def aggregate(self): ).hexdigest() # hash of all the hash - sorted for reproducibility self.compiled_hashes[task_name] = compiled_hash - for task_name, task_examples in self.details.items(): + for task_name, _ in self.details.items(): self.compiled_details[task_name].hashes = asdict(self.compiled_hashes[task_name]) - self.compiled_details[task_name].truncated = sum(di > 0 for d in task_examples for di in d.truncated) - self.compiled_details[task_name].non_truncated = ( - len(task_examples) - self.compiled_details[task_name].truncated - ) - self.compiled_details[task_name].padded = sum(di > 0 for d in task_examples for di in d.padded) - self.compiled_details[task_name].non_padded = sum(di == 0 for d in task_examples for di in d.padded) - self.compiled_details[task_name].effective_few_shots = np.mean( - [d.num_effective_few_shots for d in task_examples] - ) - self.compiled_details[task_name].num_truncated_few_shots = sum( - d.num_effective_few_shots != d.num_asked_few_shots for d in task_examples - ) hash_types: list[str] = list(self.compiled_details.values())[0].hashes.keys() @@ -455,16 +358,6 @@ def aggregate(self): ) ).hexdigest() - self.compiled_details_over_all_tasks.truncated = sum(d.truncated for d in self.compiled_details.values()) - self.compiled_details_over_all_tasks.non_truncated = sum( - d.non_truncated for d in self.compiled_details.values() - ) - self.compiled_details_over_all_tasks.padded = sum(d.padded for d in self.compiled_details.values()) - self.compiled_details_over_all_tasks.non_padded = sum(d.non_padded for d in self.compiled_details.values()) - self.compiled_details_over_all_tasks.num_truncated_few_shots = sum( - d.num_truncated_few_shots for d in self.compiled_details.values() - ) - @dataclass class MetricsLogger: @@ -499,9 +392,7 @@ def aggregate(self, task_dict: dict[str, LightevalTask], bootstrap_iters: int = """ for task_name, metrics in self.metrics_values.items(): - cur_task_name, _ = task_name.rsplit("|", 1) - # fix the fact that we need the task_dict - task = task_dict[cur_task_name] + task = task_dict[task_name] skip_metric = [] for metric_name, metric_values in metrics.items(): @@ -600,7 +491,7 @@ class TaskConfigLogger: tasks_configs: dict[str, LightevalTaskConfig] = field(default_factory=dict) def log(self, task_dict: dict[str, LightevalTask]) -> None: - self.tasks_configs = {name: task.cfg for name, task in task_dict.items()} + self.tasks_configs = {name: task.config for name, task in task_dict.items()} def log_num_docs(self, task_name: str, original_num_docs: int, effective_num_docs: int) -> None: self.tasks_configs[task_name].original_num_docs = original_num_docs diff --git a/src/lighteval/main_accelerate.py b/src/lighteval/main_accelerate.py index af0b4b754..b1e28b9ce 100644 --- a/src/lighteval/main_accelerate.py +++ b/src/lighteval/main_accelerate.py @@ -45,15 +45,9 @@ def accelerate( # noqa C901 ], tasks: Annotated[str, Argument(help="Comma-separated list of tasks to evaluate on.")], # === Common parameters === - use_chat_template: Annotated[ - bool, Option(help="Use chat template for evaluation.", rich_help_panel=HELP_PANEL_NAME_4) - ] = False, vision_model: Annotated[ bool, Option(help="Use vision model for evaluation.", rich_help_panel=HELP_PANEL_NAME_4) ] = False, - system_prompt: Annotated[ - Optional[str], Option(help="Use system prompt for evaluation.", rich_help_panel=HELP_PANEL_NAME_4) - ] = None, dataset_loading_processes: Annotated[ int, Option(help="Number of processes to use for dataset loading.", rich_help_panel=HELP_PANEL_NAME_1) ] = 1, @@ -137,8 +131,6 @@ def accelerate( # noqa C901 custom_tasks_directory=custom_tasks, num_fewshot_seeds=num_fewshot_seeds, max_samples=max_samples, - use_chat_template=use_chat_template, - system_prompt=system_prompt, load_responses_from_details_date_id=load_responses_from_details_date_id, ) @@ -149,8 +141,6 @@ def accelerate( # noqa C901 # We extract the model args config: dict = ModelConfig._parse_args(model_args) - config["use_chat_template"] = use_chat_template - if config.get("delta_weights", False): model_config = DeltaModelConfig(**config) elif config.get("adapter_weights", False): diff --git a/src/lighteval/main_baseline.py b/src/lighteval/main_baseline.py index 2dd970ea8..59a26c660 100644 --- a/src/lighteval/main_baseline.py +++ b/src/lighteval/main_baseline.py @@ -21,15 +21,12 @@ # SOFTWARE. -import os from typing import Optional from typer import Argument, Option from typing_extensions import Annotated -CACHE_DIR: str = os.getenv("HF_HOME", "/scratch") - HELP_PANEL_NAME_1 = "Common Parameters" HELP_PANEL_NAME_2 = "Logging Parameters" HELP_PANEL_NAME_3 = "Debug Parameters" @@ -38,9 +35,6 @@ def baseline( tasks: Annotated[str, Argument(help="Comma-separated list of tasks to evaluate on.")], - cache_dir: Annotated[ - str, Option(help="Cache directory for datasets and models.", rich_help_panel=HELP_PANEL_NAME_1) - ] = CACHE_DIR, custom_tasks: Annotated[ Optional[str], Option(help="Path to custom tasks directory.", rich_help_panel=HELP_PANEL_NAME_1) ] = None, @@ -68,15 +62,16 @@ def baseline( This baseline computation may not be suitable for all task types and should be used with caution. """ from lighteval.logging.evaluation_tracker import EvaluationTracker - from lighteval.metrics.utils.metric_utils import MetricCategory from lighteval.models.abstract_model import ModelInfo - from lighteval.tasks.lighteval_task import LightevalTask - from lighteval.tasks.registry import Registry, taskinfo_selector + from lighteval.tasks.lighteval_task import LightevalTask, LightevalTaskConfig + from lighteval.tasks.registry import Registry + from lighteval.tasks.requests import SamplingMethod from lighteval.utils.utils import as_list - task_registry = Registry(cache_dir=cache_dir, custom_tasks=custom_tasks) - task_names_list, fewshots_dict = taskinfo_selector(tasks, task_registry) - task_dict = task_registry.get_task_dict(task_names_list) + registry = Registry(custom_tasks=custom_tasks) + + task_configs: list[LightevalTaskConfig] = registry.get_tasks_configs(tasks) + tasks_dict: dict[str, LightevalTask] = registry.get_tasks_from_configs(task_configs) evaluation_tracker = EvaluationTracker( output_dir=output_dir, @@ -87,18 +82,19 @@ def baseline( hub_results_org=None, ) evaluation_tracker.general_config_logger.log_model_info( + {}, ModelInfo( model_name="lighteval/baseline", model_sha=None, model_dtype=None, model_size=None, - ) + ), ) - evaluation_tracker.task_config_logger.log(task_dict) + evaluation_tracker.task_config_logger.log(tasks_dict) - LightevalTask.load_datasets(list(task_dict.values()), dataset_loading_processes) + LightevalTask.load_datasets(tasks_dict, dataset_loading_processes) - for task_name, task in task_dict.items(): + for task_name, task in tasks_dict.items(): task_docs = list(task.eval_docs()) n_samples = min(max_samples, len(task_docs)) if max_samples else len(task_docs) @@ -107,15 +103,11 @@ def baseline( ] metric_results = { - metric.metric_name: p_correct_score - if metric.category - in [MetricCategory.MULTICHOICE, MetricCategory.MULTICHOICE_PMI, MetricCategory.MULTICHOICE_ONE_TOKEN] - else 0 + metric.metric_name: p_correct_score if metric.category in [SamplingMethod.LOGPROBS] else 0 for metric in task.metrics } - for fewshots, _ in fewshots_dict[task_name]: - evaluation_tracker.metrics_logger.log(f"{task_name}|{fewshots}", metric_results) + evaluation_tracker.metrics_logger.log(task_name, metric_results) - evaluation_tracker.metrics_logger.aggregate(task_dict=task_dict, bootstrap_iters=1000) + evaluation_tracker.metrics_logger.aggregate(task_dict=tasks_dict, bootstrap_iters=1000) evaluation_tracker.save() diff --git a/src/lighteval/main_custom.py b/src/lighteval/main_custom.py index 3ff3817c1..6cf4f2ae8 100644 --- a/src/lighteval/main_custom.py +++ b/src/lighteval/main_custom.py @@ -19,7 +19,6 @@ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. -import os from typing import Optional import typer @@ -32,9 +31,6 @@ app = typer.Typer() -TOKEN = os.getenv("HF_TOKEN") -CACHE_DIR: str = os.getenv("HF_HOME", "/scratch") - HELP_PANNEL_NAME_1 = "Common Parameters" HELP_PANNEL_NAME_2 = "Logging Parameters" HELP_PANNEL_NAME_3 = "Debug Parameters" @@ -48,21 +44,12 @@ def custom( model_definition_file_path: Annotated[str, Argument(help="The model definition file path to evaluate")], tasks: Annotated[str, Argument(help="Comma-separated list of tasks to evaluate on.")], # === Common parameters === - use_chat_template: Annotated[ - bool, Option(help="Use chat template for evaluation.", rich_help_panel=HELP_PANNEL_NAME_4) - ] = False, - system_prompt: Annotated[ - Optional[str], Option(help="Use system prompt for evaluation.", rich_help_panel=HELP_PANNEL_NAME_4) - ] = None, dataset_loading_processes: Annotated[ int, Option(help="Number of processes to use for dataset loading.", rich_help_panel=HELP_PANNEL_NAME_1) ] = 1, custom_tasks: Annotated[ Optional[str], Option(help="Path to custom tasks directory.", rich_help_panel=HELP_PANNEL_NAME_1) ] = None, - cache_dir: Annotated[ - str, Option(help="Cache directory for datasets and models.", rich_help_panel=HELP_PANNEL_NAME_1) - ] = CACHE_DIR, num_fewshot_seeds: Annotated[ int, Option(help="Number of seeds to use for few-shot evaluation.", rich_help_panel=HELP_PANNEL_NAME_1) ] = 1, @@ -126,8 +113,6 @@ def custom( custom_tasks_directory=custom_tasks, num_fewshot_seeds=num_fewshot_seeds, max_samples=max_samples, - use_chat_template=use_chat_template, - system_prompt=system_prompt, ) pipeline = Pipeline( tasks=tasks, diff --git a/src/lighteval/main_endpoint.py b/src/lighteval/main_endpoint.py index 7f706abf0..ec5be08c9 100644 --- a/src/lighteval/main_endpoint.py +++ b/src/lighteval/main_endpoint.py @@ -50,12 +50,6 @@ def inference_endpoint( ), ] = False, # === Common parameters === - use_chat_template: Annotated[ - bool, Option(help="Use chat template for evaluation.", rich_help_panel=HELP_PANEL_NAME_4) - ] = False, - system_prompt: Annotated[ - Optional[str], Option(help="Use system prompt for evaluation.", rich_help_panel=HELP_PANEL_NAME_4) - ] = None, dataset_loading_processes: Annotated[ int, Option(help="Number of processes to use for dataset loading.", rich_help_panel=HELP_PANEL_NAME_1) ] = 1, @@ -141,8 +135,6 @@ def inference_endpoint( custom_tasks_directory=custom_tasks, num_fewshot_seeds=num_fewshot_seeds, max_samples=max_samples, - use_chat_template=use_chat_template, - system_prompt=system_prompt, load_responses_from_details_date_id=load_responses_from_details_date_id, ) pipeline = Pipeline( @@ -171,12 +163,6 @@ def tgi( ], tasks: Annotated[str, Argument(help="Comma-separated list of tasks to evaluate on.")], # === Common parameters === - use_chat_template: Annotated[ - bool, Option(help="Use chat template for evaluation.", rich_help_panel=HELP_PANEL_NAME_4) - ] = False, - system_prompt: Annotated[ - Optional[str], Option(help="Use system prompt for evaluation.", rich_help_panel=HELP_PANEL_NAME_4) - ] = None, dataset_loading_processes: Annotated[ int, Option(help="Number of processes to use for dataset loading.", rich_help_panel=HELP_PANEL_NAME_1) ] = 1, @@ -266,8 +252,6 @@ def tgi( custom_tasks_directory=custom_tasks, num_fewshot_seeds=num_fewshot_seeds, max_samples=max_samples, - use_chat_template=use_chat_template, - system_prompt=system_prompt, load_responses_from_details_date_id=load_responses_from_details_date_id, ) pipeline = Pipeline( @@ -299,9 +283,6 @@ def litellm( ], tasks: Annotated[str, Argument(help="Comma-separated list of tasks to evaluate on.")], # === Common parameters === - system_prompt: Annotated[ - Optional[str], Option(help="Use system prompt for evaluation.", rich_help_panel=HELP_PANEL_NAME_4) - ] = None, dataset_loading_processes: Annotated[ int, Option(help="Number of processes to use for dataset loading.", rich_help_panel=HELP_PANEL_NAME_1) ] = 1, @@ -394,8 +375,6 @@ def litellm( custom_tasks_directory=custom_tasks, num_fewshot_seeds=num_fewshot_seeds, max_samples=max_samples, - use_chat_template=True, - system_prompt=system_prompt, load_responses_from_details_date_id=load_responses_from_details_date_id, ) pipeline = Pipeline( @@ -428,9 +407,6 @@ def inference_providers( ], tasks: Annotated[str, Argument(help="Comma-separated list of tasks to evaluate on.")], # === Common parameters === - system_prompt: Annotated[ - Optional[str], Option(help="Use system prompt for evaluation.", rich_help_panel=HELP_PANEL_NAME_4) - ] = None, dataset_loading_processes: Annotated[ int, Option(help="Number of processes to use for dataset loading.", rich_help_panel=HELP_PANEL_NAME_1) ] = 1, @@ -482,7 +458,7 @@ def inference_providers( ] = 0, ): """ - Evaluate models using LiteLLM as backend. + Evaluate models using HuggingFace's inference providers as backend. """ from lighteval.logging.evaluation_tracker import EvaluationTracker @@ -502,14 +478,12 @@ def inference_providers( wandb=wandb, ) - # TODO (nathan): better handling of model_args parallelism_manager = ParallelismManager.NONE if model_args.endswith(".yaml"): model_config = InferenceProvidersModelConfig.from_path(model_args) else: - model_args_dict: dict = {k.split("=")[0]: k.split("=")[1] if "=" in k else True for k in model_args.split(",")} - model_config = InferenceProvidersModelConfig(**model_args_dict) + model_config = InferenceProvidersModelConfig.from_args(model_args) pipeline_params = PipelineParameters( launcher_type=parallelism_manager, @@ -518,8 +492,6 @@ def inference_providers( custom_tasks_directory=custom_tasks, num_fewshot_seeds=num_fewshot_seeds, max_samples=max_samples, - use_chat_template=True, - system_prompt=system_prompt, load_responses_from_details_date_id=None, ) pipeline = Pipeline( diff --git a/src/lighteval/main_nanotron.py b/src/lighteval/main_nanotron.py index 083e74f5a..a64bfcdb9 100644 --- a/src/lighteval/main_nanotron.py +++ b/src/lighteval/main_nanotron.py @@ -101,8 +101,6 @@ def nanotron( custom_tasks_directory=lighteval_config.tasks.custom_tasks, num_fewshot_seeds=1, max_samples=lighteval_config.tasks.max_samples, - use_chat_template=False, - system_prompt=None, ) pipeline = Pipeline( diff --git a/src/lighteval/main_sglang.py b/src/lighteval/main_sglang.py index 4edc5f3e5..13fe647ad 100644 --- a/src/lighteval/main_sglang.py +++ b/src/lighteval/main_sglang.py @@ -41,12 +41,6 @@ def sglang( ], tasks: Annotated[str, Argument(help="Comma-separated list of tasks to evaluate on.")], # === Common parameters === - use_chat_template: Annotated[ - bool, Option(help="Use chat template for evaluation.", rich_help_panel=HELP_PANEL_NAME_4) - ] = False, - system_prompt: Annotated[ - Optional[str], Option(help="Use system prompt for evaluation.", rich_help_panel=HELP_PANEL_NAME_4) - ] = None, dataset_loading_processes: Annotated[ int, Option(help="Number of processes to use for dataset loading.", rich_help_panel=HELP_PANEL_NAME_1) ] = 1, @@ -127,8 +121,6 @@ def sglang( custom_tasks_directory=custom_tasks, num_fewshot_seeds=num_fewshot_seeds, max_samples=max_samples, - use_chat_template=use_chat_template, - system_prompt=system_prompt, load_responses_from_details_date_id=load_responses_from_details_date_id, ) diff --git a/src/lighteval/main_tasks.py b/src/lighteval/main_tasks.py index a22a4b321..196f92b73 100644 --- a/src/lighteval/main_tasks.py +++ b/src/lighteval/main_tasks.py @@ -20,7 +20,6 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import logging -import os from typing import Optional import typer @@ -29,7 +28,6 @@ app = typer.Typer() -CACHE_DIR = os.getenv("HF_HOME") @app.command() @@ -38,7 +36,6 @@ def inspect( custom_tasks: Annotated[Optional[str], Option(help="Path to a file with custom tasks")] = None, num_samples: Annotated[int, Option(help="Number of samples to display")] = 10, show_config: Annotated[bool, Option(help="Will display the full task config")] = False, - cache_dir: Annotated[Optional[str], Option(help="Cache directory used to store datasets and models")] = CACHE_DIR, ): """ Inspect a tasks @@ -50,7 +47,7 @@ def inspect( from lighteval.tasks.registry import Registry, taskinfo_selector - registry = Registry(cache_dir=cache_dir, custom_tasks=custom_tasks) + registry = Registry(custom_tasks=custom_tasks) # Loading task task_names_list, _ = taskinfo_selector(tasks, task_registry=registry) @@ -74,7 +71,7 @@ def list(custom_tasks: Annotated[Optional[str], Option(help="Path to a file with """ from lighteval.tasks.registry import Registry - registry = Registry(cache_dir=CACHE_DIR, custom_tasks=custom_tasks) + registry = Registry(custom_tasks=custom_tasks) registry.print_all_tasks() diff --git a/src/lighteval/main_vllm.py b/src/lighteval/main_vllm.py index b2eb26e15..907c4ace9 100644 --- a/src/lighteval/main_vllm.py +++ b/src/lighteval/main_vllm.py @@ -41,12 +41,6 @@ def vllm( ], tasks: Annotated[str, Argument(help="Comma-separated list of tasks to evaluate on.")], # === Common parameters === - use_chat_template: Annotated[ - bool, Option(help="Use chat template for evaluation.", rich_help_panel=HELP_PANEL_NAME_4) - ] = False, - system_prompt: Annotated[ - Optional[str], Option(help="Use system prompt for evaluation.", rich_help_panel=HELP_PANEL_NAME_4) - ] = None, cot_prompt: Annotated[ Optional[str], Option(help="Use chain of thought prompt for evaluation.", rich_help_panel=HELP_PANEL_NAME_4) ] = None, @@ -130,8 +124,6 @@ def vllm( custom_tasks_directory=custom_tasks, num_fewshot_seeds=num_fewshot_seeds, max_samples=max_samples, - use_chat_template=use_chat_template, - system_prompt=system_prompt, cot_prompt=cot_prompt, load_responses_from_details_date_id=load_responses_from_details_date_id, ) diff --git a/src/lighteval/metrics/__init__.py b/src/lighteval/metrics/__init__.py index f3d51941a..2a4b6d4c2 100644 --- a/src/lighteval/metrics/__init__.py +++ b/src/lighteval/metrics/__init__.py @@ -21,221 +21,37 @@ # SOFTWARE. -from lighteval.metrics.metrics import Metric, MetricCategory +from lighteval.metrics.metrics import Metric from lighteval.models.model_output import ModelResponse from lighteval.tasks.requests import Doc -from lighteval.utils.utils import as_list -def apply_target_perplexity_metric( - sample_ids: list[str], responses: list[list[ModelResponse]], formatted_docs: list[Doc], metrics: list[Metric] -): +def apply_metric(responses: list[ModelResponse], docs: list[Doc], metrics: list[Metric]): outputs = [] + for metric in metrics: + if metric.batched_compute: + outputs_per_metrics: list = [] - for sample_id, results, formatted_doc in zip(sample_ids, responses, formatted_docs): - output = {} - - target_golds = formatted_doc.get_golds() - assert len(results) == len(target_golds), "You should return as many results as there are golds" - target_logprobs = [res.result[0] for res in results] - argmax_logits_eq_gold_list = [res.result[1] for res in results] - target_tokens = [res.generated_tokens for res in results] - - for metric in metrics: - if metric.category == MetricCategory.TARGET_PERPLEXITY: - output.update( - metric.compute( - logprobs=target_logprobs, - argmax_logits_eq_gold_list=argmax_logits_eq_gold_list, - reference_texts=target_golds, - target_tokens=target_tokens, - ) - ) - outputs.append(output) - - return outputs + outputs_per_metrics.append(metric.compute(responses=responses, docs=docs)) + # We merge the outputs per metric in a list of dict for each sample + # example: [{metric1_sample1, metric2_sample1}, {metric1_sample2, metric2_sample2}] + for i in range(len(docs)): + output = {} + for metric_outputs in outputs_per_metrics: + output.update(metric_outputs[i]) + outputs.append(output) -def apply_perplexity_metric( - sample_ids: list[str], responses: list[list[ModelResponse]], formatted_docs: list[Doc], metrics: list[Metric] -): - outputs = [] - for sample_id, results, formatted_doc in zip(sample_ids, responses, formatted_docs): - output = {} - if len(results) > 1: - raise Exception("You returned more than one result for a sample with a perplexity metric.") - results = results[0] - - # Sometimes, processing was added for the log processings - # that we don't want to include when computing the sentence length - # Check if we want to keep this or not - if formatted_doc.original_query not in [None, ""]: - reference_text = formatted_doc.original_query else: - reference_text = formatted_doc.query - - for metric in metrics: - if metric.category == MetricCategory.PERPLEXITY: - output.update(metric.compute(logprobs=[results.result], reference_texts=[reference_text])) - - outputs.append(output) - - return outputs - - -def apply_generative_metric( # noqa: C901 - sample_ids: list[str], - responses: list[list[ModelResponse]], - formatted_docs: list[Doc], - metrics: list[Metric], -): - outputs = [] - - for sample_id, results, formatted_doc in zip(sample_ids, responses, formatted_docs): - output = {} - - # Extracting gold - try: - golds = formatted_doc.get_golds() - except (KeyError, IndexError): - golds = None - - # Post processing prediction - if len(results) > 1: - # In case of sampling, it's a list of one list of n samples - raise Exception("You returned more than one result for a sample with a generative metric.") - results = results[0] - - # Post processing prediction - preds_raw = as_list(results.result) - preds = [] - - for pred_raw in preds_raw: - pred = pred_raw - preds.append(pred) - - for metric in metrics: - output.update( - metric.compute( - golds=golds, - predictions=preds, - formatted_doc=formatted_doc, - ) - ) - outputs.append(output) - - return outputs - - -def apply_multichoice_metric( - sample_ids: list[str], responses: list[list[ModelResponse]], formatted_docs: list[Doc], metrics: list[Metric] -): - outputs = [] - for sample_id, results, formatted_doc in zip(sample_ids, responses, formatted_docs): - output = {} - n_choices = len(formatted_doc.choices) - is_pmi_category = all(metric.category == MetricCategory.MULTICHOICE_PMI for metric in metrics) - - if n_choices <= 1: - raise ValueError( - "You can't use a multi choice metric with only one choice. Use `acc_golds_likelihood` instead." - ) - - if not is_pmi_category and len(results) != len(formatted_doc.choices): - raise Exception( - f"You shoud have returned as many model outputs as choices when using an multi choice metric. Returned {len(results)} instead of {len(formatted_doc.choices)}" - ) - - if is_pmi_category and len(results) != n_choices * 2: - raise Exception( - f"You shoud have returned twice as many model outputs as choices when using an probability multi choice metric. Returned {len(results)} instead of {n_choices * 2} (conditioned and unconditioned)" - ) - - mc_results = results[:n_choices] - # Todo: make better system with return_bool_score instead of taking first element - conditioned_lp = [res.result[0] for res in mc_results] - unconditioned_lp = None - if is_pmi_category: - unconditioned_lp = [res.result[0] for res in results[n_choices : n_choices * 2]] - - gold_ixs = as_list(formatted_doc.gold_index) - choices_tokens = [res.generated_tokens for res in mc_results] - - for metric in metrics: - if metric.category == MetricCategory.MULTICHOICE_PMI or metric.category == MetricCategory.MULTICHOICE: - output.update( - metric.compute( - gold_ixs=gold_ixs, - choices_logprob=conditioned_lp, - unconditioned_logprob=unconditioned_lp, - choices_tokens=choices_tokens, - formatted_doc=formatted_doc, + for model_response, doc in zip(responses, docs): + output = {} + for metric in metrics: + output.update( + metric.compute( + model_response=model_response, + doc=doc, + ) ) - ) - outputs.append(output) - - return outputs - - -def apply_multichoice_metric_one_token( - sample_ids: list[str], responses: list[list[ModelResponse]], formatted_docs: list[Doc], metrics: list[Metric] -): - outputs = [] - - for sample_id, results, formatted_doc in zip(sample_ids, responses, formatted_docs): - output = {} - - if len(results) > 1: - raise Exception( - "You returned more than one result for a sample with a gmultichoice metric on only one token." - ) - results = results[0] - choices_logprob = results.result - choices_texts = formatted_doc.choices - gold_ixs = as_list(formatted_doc.gold_index) - - for metric in metrics: - if metric.category == MetricCategory.MULTICHOICE_ONE_TOKEN: - output.update( - metric.compute( - choices_logprob=choices_logprob, - # Neither token or PMI are supported for this metric - unconditioned_logprob=None, - choices_tokens=None, - choices_texts=choices_texts, - gold_ixs=gold_ixs, - formatted_doc=formatted_doc, - ) - ) - - outputs.append(output) - - return outputs - - -def apply_llm_as_judge_metric( - sample_ids: list[str], responses: list[list[ModelResponse]], formatted_docs: list[Doc], metrics: list[Metric] -): - """ - Apply the LLM as judge metric to the responses. The batching is managed at the judge level. - """ - # outputs per metric is a list containing a list of dict for each metric - # example: [[{metric1_sample1}, {metric1_sample2}], [{metric2_sample1}, {metric2_sample2}]] - outputs_per_metrics: list[list[dict]] = [] - - for metric in metrics: - if metric.category in [MetricCategory.LLM_AS_JUDGE_MULTI_TURN, MetricCategory.LLM_AS_JUDGE]: - outputs_per_metrics.append( - metric.compute(sample_ids=sample_ids, responses=responses, formatted_docs=formatted_docs) - ) - - # We merge the outputs per metric in a list of dict for each sample - # example: [{metric1_sample1, metric2_sample1}, {metric1_sample2, metric2_sample2}] - outputs = [] - for i in range(len(sample_ids)): - output = {} - for metric_outputs in outputs_per_metrics: - output.update(metric_outputs[i]) - outputs.append(output) + outputs.append(output) return outputs diff --git a/src/lighteval/metrics/dynamic_metrics.py b/src/lighteval/metrics/dynamic_metrics.py index 3e0b45121..29659ae20 100644 --- a/src/lighteval/metrics/dynamic_metrics.py +++ b/src/lighteval/metrics/dynamic_metrics.py @@ -34,7 +34,6 @@ ) from lighteval.metrics.normalizations import ( LogProbNormalization, - LogProbPMINorm, LogProbTokenNorm, get_multilingual_normalizer, ) @@ -47,8 +46,9 @@ get_extraction_regexes, ) from lighteval.metrics.utils.math_comparison import compare_gold_target -from lighteval.metrics.utils.metric_utils import MetricCategory, MetricUseCase, SampleLevelMetric -from lighteval.tasks.requests import Doc +from lighteval.metrics.utils.metric_utils import SampleLevelMetric +from lighteval.models.model_output import ModelResponse +from lighteval.tasks.requests import Doc, SamplingMethod from lighteval.utils.language import Language from lighteval.utils.timeout import timeout @@ -66,10 +66,7 @@ def loglikelihood_acc_metric(normalization: LogProbNormalization | None = None) return SampleLevelMetric( metric_name=metric_name, sample_level_fn=LoglikelihoodAcc(logprob_normalization=normalization).compute, - category=MetricCategory.MULTICHOICE - if not normalization == LogProbPMINorm() - else MetricCategory.MULTICHOICE_PMI, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.LOGPROBS, corpus_level_fn=np.mean, higher_is_better=True, ) @@ -91,10 +88,7 @@ def normalized_multi_choice_prob_metric( sample_level_fn=NormalizedMultiChoiceProbability( log_prob_normalization=normalization, aggregation_function=aggregation_function ).compute, - category=MetricCategory.MULTICHOICE - if not normalization == LogProbPMINorm() - else MetricCategory.MULTICHOICE_PMI, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.LOGPROBS, corpus_level_fn=np.mean, higher_is_better=True, ) @@ -114,8 +108,7 @@ def probability_metric( return SampleLevelMetric( metric_name=metric_name, sample_level_fn=Probability(normalization=normalization, aggregation_function=aggregation_function).compute, - category=MetricCategory.TARGET_PERPLEXITY, - use_case=MetricUseCase.PERPLEXITY, + category=SamplingMethod.LOGPROBS, corpus_level_fn=np.mean, higher_is_better=True, ) @@ -144,8 +137,7 @@ def multilingual_quasi_f1_score_metric( normalize_pred=multilang_normalizer, aggregation_function=aggregation_function, ).compute, - category=MetricCategory.GENERATIVE, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) @@ -178,8 +170,7 @@ def multilingual_quasi_exact_match_metric( aggregation_function=aggregation_function, type_exact_match=match_type, ).compute, - category=MetricCategory.GENERATIVE, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) @@ -243,9 +234,12 @@ def add_to_specifics_with_timeout( ] formatted_doc.specific["extracted_golds"] = [str(gold) for golds in extracted_golds for gold in golds] - def sample_level_fn(golds: list[str], predictions: list[str], formatted_doc: Doc) -> float: - gold_extraction_regexes = get_extraction_regexes(formatted_doc, gold_extraction_target, language) - pred_extraction_regexes = get_extraction_regexes(formatted_doc, pred_extraction_target, language) + def sample_level_fn(doc: Doc, model_response: ModelResponse) -> float: + golds = doc.get_golds() + predictions = model_response.text + + gold_extraction_regexes = get_extraction_regexes(doc, gold_extraction_target, language) + pred_extraction_regexes = get_extraction_regexes(doc, pred_extraction_target, language) extracted_predictions = [ extract_target_from_pred(pred, pred_extraction_regexes, fallback_mode, extraction_mode, timeout_seconds) @@ -268,7 +262,7 @@ def sample_level_fn(golds: list[str], predictions: list[str], formatted_doc: Doc # We have to use timeout because the sypmy to str conversion can be very slow try: - add_to_specifics_with_timeout(formatted_doc, extracted_predictions, extracted_golds) + add_to_specifics_with_timeout(doc, extracted_predictions, extracted_golds) except Exception: # noqa: E722 logger.warning("Timeout when adding extracted predictions and golds to specific") @@ -289,8 +283,7 @@ def sample_level_fn(golds: list[str], predictions: list[str], formatted_doc: Doc return SampleLevelMetric( metric_name="extractive_match", sample_level_fn=sample_level_fn, - category=MetricCategory.GENERATIVE, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) diff --git a/src/lighteval/metrics/harness_compatibility/drop.py b/src/lighteval/metrics/harness_compatibility/drop.py index d6c8ac30b..f12828cbe 100644 --- a/src/lighteval/metrics/harness_compatibility/drop.py +++ b/src/lighteval/metrics/harness_compatibility/drop.py @@ -27,8 +27,11 @@ import numpy as np from scipy.optimize import linear_sum_assignment +from lighteval.models.model_output import ModelResponse +from lighteval.tasks.requests import Doc -def drop_metrics(predictions: list[str], formatted_doc, **kwargs): # noqa: C901 + +def drop_metrics(doc: Doc, model_response: ModelResponse): # noqa: C901 """F1 score from bag of words: comes from Harness Drop. DROP offers two metrics, a quasi exact match and a numeracy-focused F1 score. Quasi in the sense that it does some normalizations before matching and numeracy-focused in the sense that @@ -161,8 +164,8 @@ def _normalize(answer: str): max_em = 0 max_f1 = 0 - for gold_answer in formatted_doc.specific["golds_no_preprocessing"]: - exact_match, f1_score = _get_metrics(predictions, gold_answer) + for gold_answer in doc.specific["golds_no_preprocessing"]: + exact_match, f1_score = _get_metrics(model_response.text, gold_answer) if isinstance(gold_answer, list): gold_answer = gold_answer[0] if gold_answer.strip(): diff --git a/src/lighteval/metrics/harness_compatibility/truthful_qa.py b/src/lighteval/metrics/harness_compatibility/truthful_qa.py index e4b42ee50..771077222 100644 --- a/src/lighteval/metrics/harness_compatibility/truthful_qa.py +++ b/src/lighteval/metrics/harness_compatibility/truthful_qa.py @@ -22,17 +22,13 @@ import numpy as np +from lighteval.models.model_output import ModelResponse from lighteval.tasks.requests import Doc +from lighteval.utils.utils import as_list # Comes from the harness -def truthfulqa_mc_metrics( - gold_ixs: list[int], - choices_logprob: list[float], - unconditioned_logprob: list[float] | None, - choices_tokens: list[list[int]] | None, - formatted_doc: Doc, -): +def truthfulqa_mc_metrics(doc: Doc, model_response: ModelResponse): def mc1(lls): # The gold answers in `mc1_targets` are always first (index = `0`). return np.argmax(lls) == 0 @@ -43,12 +39,15 @@ def mc2(lls, split_idx): p_true = p_true / (sum(p_true) + sum(p_false)) return sum(p_true) + gold_ixs = as_list(doc.gold_index) + choices_logprob = model_response.logprobs + # The harness assumes that all items are gold before the last one, but that is not always the case # For gold ix 5, 6, 8, the harness will look at the first "gap" (7) and consider that the following # items are not gold (even though here, 8 is gold). Example at item 371 of the dataset. # This is broken and will have to be fixed once we OSS this, by actually separating # gold and not gold items for mc2 computations - len_mc1 = formatted_doc.specific["len_mc1"] + len_mc1 = doc.specific["len_mc1"] last_harness_gold = gold_ixs[1] - 1 # fake value to init the loop for g in gold_ixs[1:]: # we ignore the first item, which is the gold for mc1 if last_harness_gold == g - 1: diff --git a/src/lighteval/metrics/metrics.py b/src/lighteval/metrics/metrics.py index efc762dec..23d97a076 100644 --- a/src/lighteval/metrics/metrics.py +++ b/src/lighteval/metrics/metrics.py @@ -20,6 +20,8 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. +from typing import Callable + import numpy as np from aenum import Enum @@ -27,9 +29,6 @@ ExprExtractionConfig, IndicesExtractionConfig, LatexExtractionConfig, - compare_gold_target, - extract_target_from_pred, - get_extraction_regexes, multilingual_extractive_match_metric, ) from lighteval.metrics.harness_compatibility.drop import drop_metrics @@ -69,16 +68,20 @@ remove_braces, remove_braces_and_strip, ) -from lighteval.metrics.sample_preparator import GenerativePreparator, LoglikelihoodPreparator, PerplexityPreparator +from lighteval.metrics.sample_preparator import ( + GenerativePreparator, + LoglikelihoodPreparator, + PerplexityPreparator, + TargetPerplexityPreparator, +) from lighteval.metrics.utils.metric_utils import ( CorpusLevelMetric, CorpusLevelMetricGrouping, Metric, - MetricCategory, MetricGrouping, - MetricUseCase, SampleLevelMetric, SampleLevelMetricGrouping, + SamplingMethod, ) from lighteval.utils.language import Language from lighteval.utils.utils import as_list @@ -88,48 +91,42 @@ class Metrics(Enum): acc_golds_likelihood = SampleLevelMetric( # todo: we need a better name for this! metric_name="acc", sample_level_fn=acc_golds_likelihood, - category=MetricCategory.TARGET_PERPLEXITY, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.LOGPROBS, corpus_level_fn=np.mean, higher_is_better=True, ) bert_score = SampleLevelMetricGrouping( metric_name=["BERTScore-P", "BERTScore-R", "BERTScore-F"], sample_level_fn=BertScore(normalize_gold=remove_braces, normalize_pred=remove_braces_and_strip).compute, - category=MetricCategory.GENERATIVE, - use_case=MetricUseCase.SUMMARIZATION, + category=SamplingMethod.GENERATIVE, corpus_level_fn={"BERTScore-P": np.mean, "BERTScore-R": np.mean, "BERTScore-F": np.mean}, higher_is_better={"BERTScore-P": True, "BERTScore-R": True, "BERTScore-F": True}, ) bits_per_byte = CorpusLevelMetric( metric_name="bits_per_byte", sample_level_fn=PerplexityPreparator(units_type="bytes").prepare, - category=MetricCategory.PERPLEXITY, - use_case=MetricUseCase.PERPLEXITY, + category=SamplingMethod.PERPLEXITY, corpus_level_fn=CorpusLevelPerplexityMetric("bits_per_byte").compute, higher_is_better=False, ) bleu = CorpusLevelMetric( metric_name="bleu", sample_level_fn=GenerativePreparator().prepare, - category=MetricCategory.GENERATIVE, - use_case=MetricUseCase.TRANSLATION, + category=SamplingMethod.GENERATIVE, corpus_level_fn=CorpusLevelTranslationMetric("bleu").compute, higher_is_better=True, ) bleu_1 = SampleLevelMetric( metric_name="bleu_1", sample_level_fn=BLEU(n_gram=1).compute, - category=MetricCategory.GENERATIVE, - use_case=MetricUseCase.TRANSLATION, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) bleu_4 = SampleLevelMetric( metric_name="bleu_4", sample_level_fn=BLEU(n_gram=4).compute, - category=MetricCategory.GENERATIVE, - use_case=MetricUseCase.TRANSLATION, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) @@ -137,32 +134,28 @@ class Metrics(Enum): bleurt = SampleLevelMetric( metric_name="bleurt", sample_level_fn=BLEURT().compute, - category=MetricCategory.GENERATIVE, - use_case=MetricUseCase.TRANSLATION, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) byte_perplexity = CorpusLevelMetric( metric_name="byte_perplexity", sample_level_fn=PerplexityPreparator(units_type="bytes").prepare, - category=MetricCategory.PERPLEXITY, - use_case=MetricUseCase.PERPLEXITY, + category=SamplingMethod.PERPLEXITY, corpus_level_fn=CorpusLevelPerplexityMetric("weighted_perplexity").compute, higher_is_better=False, ) chrf = CorpusLevelMetric( metric_name="chrf", sample_level_fn=GenerativePreparator().prepare, - category=MetricCategory.GENERATIVE, - use_case=MetricUseCase.TRANSLATION, + category=SamplingMethod.GENERATIVE, corpus_level_fn=CorpusLevelTranslationMetric("chrf").compute, higher_is_better=True, ) chrf_plus = CorpusLevelMetric( metric_name="chrf++", sample_level_fn=GenerativePreparator().prepare, - category=MetricCategory.GENERATIVE, - use_case=MetricUseCase.TRANSLATION, + category=SamplingMethod.GENERATIVE, corpus_level_fn=CorpusLevelTranslationMetric("chrf++").compute, higher_is_better=True, ) @@ -171,24 +164,21 @@ class Metrics(Enum): sample_level_fn=StringDistance( metric_types=["longest_common_prefix_length", "edit_distance", "edit_similarity"], strip_prediction=True ).compute, - category=MetricCategory.GENERATIVE, - use_case=MetricUseCase.SOCIAL_IMPACTS, + category=SamplingMethod.GENERATIVE, corpus_level_fn={"longest_common_prefix_length": max, "edit_distance": min, "edit_similarity": max}, higher_is_better={"longest_common_prefix_length": True, "edit_distance": False, "edit_similarity": True}, ) drop = SampleLevelMetricGrouping( metric_name=["qem", "f1"], sample_level_fn=drop_metrics, - category=MetricCategory.GENERATIVE, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.GENERATIVE, corpus_level_fn={"qem": max, "f1": max}, higher_is_better={"qem": True, "f1": True}, ) exact_match = SampleLevelMetric( metric_name="em", sample_level_fn=ExactMatches(strip_strings=True).compute, - category=MetricCategory.GENERATIVE, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) @@ -206,8 +196,7 @@ class Metrics(Enum): sample_level_fn=Extractiveness( normalize_input=remove_braces, normalize_pred=remove_braces_and_strip, input_column="text" ).compute, - category=MetricCategory.GENERATIVE, - use_case=MetricUseCase.SUMMARIZATION, + category=SamplingMethod.GENERATIVE, corpus_level_fn={ "summarization_coverage": np.mean, "summarization_density": np.mean, @@ -222,32 +211,28 @@ class Metrics(Enum): f1_score_quasi = SampleLevelMetric( metric_name="f1_score_quasi", sample_level_fn=F1_score(normalize_gold=helm_normalizer, normalize_pred=helm_normalizer).compute, - category=MetricCategory.GENERATIVE, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) f1_score = SampleLevelMetric( metric_name="f1", sample_level_fn=F1_score().compute, - category=MetricCategory.GENERATIVE, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) f1_score_macro = CorpusLevelMetric( metric_name="f1", sample_level_fn=GenerativePreparator().prepare, - category=MetricCategory.GENERATIVE, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.GENERATIVE, corpus_level_fn=CorpusLevelF1Score(average="macro").compute, higher_is_better=True, ) f1_score_micro = CorpusLevelMetric( metric_name="f1", sample_level_fn=GenerativePreparator().prepare, - category=MetricCategory.GENERATIVE, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.GENERATIVE, corpus_level_fn=CorpusLevelF1Score(average="micro").compute, higher_is_better=True, ) @@ -256,8 +241,7 @@ class Metrics(Enum): sample_level_fn=Faithfulness( normalize_input=remove_braces, normalize_pred=remove_braces_and_strip, input_column="text" ).compute, - category=MetricCategory.GENERATIVE, - use_case=MetricUseCase.SUMMARIZATION, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) @@ -273,72 +257,63 @@ class Metrics(Enum): loglikelihood_acc = SampleLevelMetric( metric_name="acc", sample_level_fn=LoglikelihoodAcc(logprob_normalization=None).compute, - category=MetricCategory.MULTICHOICE, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.LOGPROBS, corpus_level_fn=np.mean, higher_is_better=True, ) loglikelihood_acc_norm = SampleLevelMetric( metric_name="acc_norm", sample_level_fn=LoglikelihoodAcc(logprob_normalization=LogProbCharNorm()).compute, - category=MetricCategory.MULTICHOICE, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.LOGPROBS, corpus_level_fn=np.mean, higher_is_better=True, ) loglikelihood_acc_norm_nospace = SampleLevelMetric( metric_name="acc_norm", sample_level_fn=LoglikelihoodAcc(logprob_normalization=LogProbCharNorm(ignore_first_space=True)).compute, - category=MetricCategory.MULTICHOICE, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.LOGPROBS, corpus_level_fn=np.mean, higher_is_better=True, ) loglikelihood_acc_norm_single_token = SampleLevelMetric( metric_name="acc_norm", sample_level_fn=LoglikelihoodAcc(logprob_normalization=LogProbCharNorm()).compute, - category=MetricCategory.MULTICHOICE_ONE_TOKEN, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.LOGPROBS, corpus_level_fn=np.mean, higher_is_better=True, ) loglikelihood_acc_single_token = SampleLevelMetric( metric_name="acc", sample_level_fn=LoglikelihoodAcc(logprob_normalization=None).compute, - category=MetricCategory.MULTICHOICE_ONE_TOKEN, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.LOGPROBS, corpus_level_fn=np.mean, higher_is_better=True, ) loglikelihood_f1 = CorpusLevelMetric( metric_name="loglikelihood_f1", sample_level_fn=LoglikelihoodPreparator().prepare, - category=MetricCategory.MULTICHOICE, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.LOGPROBS, corpus_level_fn=CorpusLevelF1Score(None).compute, higher_is_better=True, ) loglikelihood_f1_single_token = CorpusLevelMetric( metric_name="loglikelihood_f1", sample_level_fn=LoglikelihoodPreparator(is_single_token=True).prepare, - category=MetricCategory.MULTICHOICE_ONE_TOKEN, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.LOGPROBS, corpus_level_fn=CorpusLevelF1Score(None).compute, higher_is_better=True, ) mcc = CorpusLevelMetric( metric_name="mcc", sample_level_fn=LoglikelihoodPreparator().prepare, - category=MetricCategory.MULTICHOICE, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.LOGPROBS, corpus_level_fn=matthews_corrcoef, higher_is_better=True, ) mcc_single_token = CorpusLevelMetric( metric_name="mcc", sample_level_fn=LoglikelihoodPreparator().prepare, - category=MetricCategory.MULTICHOICE_ONE_TOKEN, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.LOGPROBS, corpus_level_fn=matthews_corrcoef, higher_is_better=True, ) @@ -347,24 +322,21 @@ class Metrics(Enum): sample_level_fn=MajAtK( k=4, strip_strings=True, normalize_pred=math_normalizer, normalize_gold=math_normalizer ).compute, - category=MetricCategory.GENERATIVE_SAMPLING, - use_case=MetricUseCase.MATH, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) maj_at_5 = SampleLevelMetric( metric_name="maj@5", sample_level_fn=MajAtK(k=5).compute, - category=MetricCategory.GENERATIVE_SAMPLING, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) maj_at_8 = SampleLevelMetric( metric_name="maj@8", sample_level_fn=MajAtK(k=8).compute, - category=MetricCategory.GENERATIVE_SAMPLING, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) @@ -373,8 +345,7 @@ class Metrics(Enum): sample_level_fn=MajAtK( k=8, strip_strings=True, normalize_pred=gsm8k_normalizer, normalize_gold=gsm8k_normalizer ).compute, - category=MetricCategory.GENERATIVE_SAMPLING, - use_case=MetricUseCase.MATH, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) @@ -385,28 +356,14 @@ class Metrics(Enum): n=1, strip_strings=True, # Extracting mathematical expressions and latex expressions - normalize_gold=lambda k: extract_target_from_pred( - k, - get_extraction_regexes( - formatted_doc=None, - target_types=[ExprExtractionConfig(), LatexExtractionConfig()], - language=Language.ENGLISH, - ), - ), - # Extracting mathematical expressions and latex expressions - normalize_pred=lambda k: extract_target_from_pred( - k, - get_extraction_regexes( - formatted_doc=None, - target_types=[ExprExtractionConfig(), LatexExtractionConfig()], - language=Language.ENGLISH, - ), - ), - # Uses sympy for comparison - sample_scoring_function=compare_gold_target, + sample_scoring_function=lambda doc, model_response: multilingual_extractive_match_metric( + language=Language.ENGLISH, + gold_extraction_target=[ExprExtractionConfig(), LatexExtractionConfig()], + pred_extraction_target=[ExprExtractionConfig(), LatexExtractionConfig()], + precision=6, + ).sample_level_fn(doc, model_response), ).compute, - category=MetricCategory.GENERATIVE_SAMPLING, - use_case=MetricUseCase.REASONING, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) @@ -416,29 +373,14 @@ class Metrics(Enum): k=1, n=4, strip_strings=True, - # Extracting mathematical expressions and latex expressions - normalize_gold=lambda k: extract_target_from_pred( - k, - get_extraction_regexes( - formatted_doc=None, - target_types=[ExprExtractionConfig(), LatexExtractionConfig()], - language=Language.ENGLISH, - ), - ), - # Extracting mathematical expressions and latex expressions - normalize_pred=lambda k: extract_target_from_pred( - k, - get_extraction_regexes( - formatted_doc=None, - target_types=[ExprExtractionConfig(), LatexExtractionConfig()], - language=Language.ENGLISH, - ), - ), - # Uses sympy for comparison - sample_scoring_function=compare_gold_target, + sample_scoring_function=lambda doc, model_response: multilingual_extractive_match_metric( + language=Language.ENGLISH, + gold_extraction_target=[ExprExtractionConfig(), LatexExtractionConfig()], + pred_extraction_target=[ExprExtractionConfig(), LatexExtractionConfig()], + precision=6, + ).sample_level_fn(doc, model_response), ).compute, - category=MetricCategory.GENERATIVE_SAMPLING, - use_case=MetricUseCase.REASONING, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) @@ -449,28 +391,14 @@ class Metrics(Enum): n=8, strip_strings=True, # Extracting mathematical expressions and latex expressions - normalize_gold=lambda k: extract_target_from_pred( - k, - get_extraction_regexes( - formatted_doc=None, - target_types=[ExprExtractionConfig(), LatexExtractionConfig()], - language=Language.ENGLISH, - ), - ), - # Extracting mathematical expressions and latex expressions - normalize_pred=lambda k: extract_target_from_pred( - k, - get_extraction_regexes( - formatted_doc=None, - target_types=[ExprExtractionConfig(), LatexExtractionConfig()], - language=Language.ENGLISH, - ), - ), - # Uses sympy for comparison - sample_scoring_function=compare_gold_target, + sample_scoring_function=lambda doc, model_response: multilingual_extractive_match_metric( + language=Language.ENGLISH, + gold_extraction_target=[ExprExtractionConfig(), LatexExtractionConfig()], + pred_extraction_target=[ExprExtractionConfig(), LatexExtractionConfig()], + precision=6, + ).sample_level_fn(doc, model_response), ).compute, - category=MetricCategory.GENERATIVE_SAMPLING, - use_case=MetricUseCase.REASONING, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) @@ -480,29 +408,14 @@ class Metrics(Enum): k=1, n=16, strip_strings=True, - # Extracting mathematical expressions and latex expressions - normalize_gold=lambda k: extract_target_from_pred( - k, - get_extraction_regexes( - formatted_doc=None, - target_types=[ExprExtractionConfig(), LatexExtractionConfig()], - language=Language.ENGLISH, - ), - ), - # Extracting mathematical expressions and latex expressions - normalize_pred=lambda k: extract_target_from_pred( - k, - get_extraction_regexes( - formatted_doc=None, - target_types=[ExprExtractionConfig(), LatexExtractionConfig()], - language=Language.ENGLISH, - ), - ), - # Uses sympy for comparison - sample_scoring_function=compare_gold_target, + sample_scoring_function=lambda doc, model_response: multilingual_extractive_match_metric( + language=Language.ENGLISH, + gold_extraction_target=[ExprExtractionConfig(), LatexExtractionConfig()], + pred_extraction_target=[ExprExtractionConfig(), LatexExtractionConfig()], + precision=6, + ).sample_level_fn(doc, model_response), ).compute, - category=MetricCategory.GENERATIVE_SAMPLING, - use_case=MetricUseCase.REASONING, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) @@ -512,29 +425,14 @@ class Metrics(Enum): k=1, n=32, strip_strings=True, - # Extracting mathematical expressions and latex expressions - normalize_gold=lambda k: extract_target_from_pred( - k, - get_extraction_regexes( - formatted_doc=None, - target_types=[ExprExtractionConfig(), LatexExtractionConfig()], - language=Language.ENGLISH, - ), - ), - # Extracting mathematical expressions and latex expressions - normalize_pred=lambda k: extract_target_from_pred( - k, - get_extraction_regexes( - formatted_doc=None, - target_types=[ExprExtractionConfig(), LatexExtractionConfig()], - language=Language.ENGLISH, - ), - ), - # Uses sympy for comparison - sample_scoring_function=compare_gold_target, + sample_scoring_function=lambda doc, model_response: multilingual_extractive_match_metric( + language=Language.ENGLISH, + gold_extraction_target=[ExprExtractionConfig(), LatexExtractionConfig()], + pred_extraction_target=[ExprExtractionConfig(), LatexExtractionConfig()], + precision=6, + ).sample_level_fn(doc, model_response), ).compute, - category=MetricCategory.GENERATIVE_SAMPLING, - use_case=MetricUseCase.REASONING, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) @@ -544,29 +442,14 @@ class Metrics(Enum): k=1, n=64, strip_strings=True, - # Extracting mathematical expressions and latex expressions - normalize_gold=lambda k: extract_target_from_pred( - k, - get_extraction_regexes( - formatted_doc=None, - target_types=[ExprExtractionConfig(), LatexExtractionConfig()], - language=Language.ENGLISH, - ), - ), - # Extracting mathematical expressions and latex expressions - normalize_pred=lambda k: extract_target_from_pred( - k, - get_extraction_regexes( - formatted_doc=None, - target_types=[ExprExtractionConfig(), LatexExtractionConfig()], - language=Language.ENGLISH, - ), - ), - # Uses sympy for comparison - sample_scoring_function=compare_gold_target, + sample_scoring_function=lambda doc, model_response: multilingual_extractive_match_metric( + language=Language.ENGLISH, + gold_extraction_target=[ExprExtractionConfig(), LatexExtractionConfig()], + pred_extraction_target=[ExprExtractionConfig(), LatexExtractionConfig()], + precision=6, + ).sample_level_fn(doc, model_response), ).compute, - category=MetricCategory.GENERATIVE_SAMPLING, - use_case=MetricUseCase.REASONING, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) @@ -574,74 +457,66 @@ class Metrics(Enum): mrr = SampleLevelMetric( metric_name="mrr", sample_level_fn=MRR().compute, - category=MetricCategory.MULTICHOICE, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.LOGPROBS, corpus_level_fn=np.mean, higher_is_better=True, ) mrr_single_token = SampleLevelMetric( metric_name="mrr", sample_level_fn=mrr, - category=MetricCategory.MULTICHOICE_ONE_TOKEN, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.LOGPROBS, corpus_level_fn=np.mean, higher_is_better=True, ) multi_f1_numeric = CorpusLevelMetric( metric_name="mf1", sample_level_fn=LoglikelihoodPreparator(is_single_token=True).prepare, - category=MetricCategory.MULTICHOICE_ONE_TOKEN, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.LOGPROBS, corpus_level_fn=CorpusLevelF1Score(average=None, num_classes=3).compute, higher_is_better=True, ) pass_at_1 = SampleLevelMetric( metric_name="pass@1:32_samples", sample_level_fn=PassAtK(k=1, n=32, strip_strings=True).compute, - category=MetricCategory.GENERATIVE_SAMPLING, - use_case=MetricUseCase.REASONING, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) pass_at_10 = SampleLevelMetric( metric_name="pass@10:32_samples", sample_level_fn=PassAtK(k=10, n=32, strip_strings=True).compute, - category=MetricCategory.GENERATIVE_SAMPLING, - use_case=MetricUseCase.REASONING, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) pass_at_100 = SampleLevelMetric( metric_name="pass@100:32_samples", sample_level_fn=PassAtK(k=100, n=32, strip_strings=True).compute, - category=MetricCategory.GENERATIVE_SAMPLING, - use_case=MetricUseCase.REASONING, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) g_pass_at_16 = SampleLevelMetricGrouping( - metric_name="G-Pass@16:48_samples", + metric_name=["G-Pass@16:48_samples"], sample_level_fn=GPassAtK(k=16, n=48, strip_strings=True).compute, - category=MetricCategory.GENERATIVE_SAMPLING, - use_case=MetricUseCase.REASONING, + category=SamplingMethod.GENERATIVE, corpus_level_fn=dict.fromkeys(GPassAtK(k=16, n=48, strip_strings=True).all_metrics, np.mean), higher_is_better=dict.fromkeys(GPassAtK(k=16, n=48, strip_strings=True).all_metrics, True), ) g_pass_at_8_16 = SampleLevelMetricGrouping( - metric_name="G-Pass@8-16:48_samples", + metric_name=["G-Pass@8-16:48_samples"], sample_level_fn=GPassAtK(k=[8, 16], n=48, strip_strings=True).compute, - category=MetricCategory.GENERATIVE_SAMPLING, - use_case=MetricUseCase.REASONING, + category=SamplingMethod.GENERATIVE, corpus_level_fn=dict.fromkeys(GPassAtK(k=16, n=48, strip_strings=True).all_metrics, np.mean), higher_is_better=dict.fromkeys(GPassAtK(k=16, n=48, strip_strings=True).all_metrics, True), ) g_pass_at_16_expr_gold = SampleLevelMetricGrouping( - metric_name="G-Pass@16:48_samples", + metric_name=["G-Pass@16:48_samples"], sample_level_fn=GPassAtK( k=16, n=48, strip_strings=True, - sample_scoring_function=lambda pred, ref, doc: multilingual_extractive_match_metric( + sample_scoring_function=lambda doc, model_response: multilingual_extractive_match_metric( language=Language.ENGLISH, fallback_mode="first_match", precision=5, @@ -649,20 +524,19 @@ class Metrics(Enum): # Match boxed first before trying other regexes pred_extraction_target=(ExprExtractionConfig(), LatexExtractionConfig(boxed_match_priority=0)), aggregation_function=max, - ).sample_level_fn([ref], [pred], doc), + ).sample_level_fn(doc, model_response), ).compute, - category=MetricCategory.GENERATIVE_SAMPLING, - use_case=MetricUseCase.REASONING, + category=SamplingMethod.GENERATIVE, corpus_level_fn=dict.fromkeys(GPassAtK(k=16, n=48, strip_strings=True).all_metrics, np.mean), higher_is_better=dict.fromkeys(GPassAtK(k=16, n=48, strip_strings=True).all_metrics, True), ) g_pass_at_16_latex_gold = SampleLevelMetricGrouping( - metric_name="G-Pass@16:48_samples", + metric_name=["G-Pass@16:48_samples"], sample_level_fn=GPassAtK( k=16, n=48, strip_strings=True, - sample_scoring_function=lambda pred, ref, doc: multilingual_extractive_match_metric( + sample_scoring_function=lambda doc, model_response: multilingual_extractive_match_metric( language=Language.ENGLISH, fallback_mode="first_match", precision=5, @@ -670,34 +544,30 @@ class Metrics(Enum): # Match boxed first before trying other regexes pred_extraction_target=(ExprExtractionConfig(), LatexExtractionConfig(boxed_match_priority=0)), aggregation_function=max, - ).sample_level_fn([ref], [pred], doc), + ).sample_level_fn(doc, model_response), ).compute, - category=MetricCategory.GENERATIVE_SAMPLING, - use_case=MetricUseCase.REASONING, + category=SamplingMethod.GENERATIVE, corpus_level_fn=dict.fromkeys(GPassAtK(k=16, n=48, strip_strings=True).all_metrics, np.mean), higher_is_better=dict.fromkeys(GPassAtK(k=16, n=48, strip_strings=True).all_metrics, True), ) perfect_exact_match = SampleLevelMetric( metric_name="perfect_em", sample_level_fn=ExactMatches().compute, - category=MetricCategory.GENERATIVE, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) prediction_perplexity = SampleLevelMetric( metric_name="ppl", sample_level_fn=None, # todo!!! - category=MetricCategory.IGNORED, - use_case=MetricUseCase.PERPLEXITY, + category=SamplingMethod.PERPLEXITY, corpus_level_fn=CorpusLevelPerplexityMetric("perplexity").compute, higher_is_better=True, ) prefix_exact_match = SampleLevelMetric( metric_name="pem", sample_level_fn=ExactMatches(strip_strings=True, type_exact_match="prefix").compute, - category=MetricCategory.GENERATIVE, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) @@ -708,8 +578,7 @@ class Metrics(Enum): normalize_pred=helm_normalizer, type_exact_match="prefix", ).compute, - category=MetricCategory.GENERATIVE, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) @@ -720,8 +589,7 @@ class Metrics(Enum): normalize_pred=helm_normalizer, strip_strings=True, ).compute, - category=MetricCategory.GENERATIVE, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) @@ -730,16 +598,14 @@ class Metrics(Enum): sample_level_fn=ExactMatches( strip_strings=True, normalize_pred=math_normalizer, normalize_gold=math_normalizer ).compute, - category=MetricCategory.GENERATIVE, - use_case=MetricUseCase.MATH, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) quasi_exact_match_triviaqa = SampleLevelMetric( metric_name="qem", sample_level_fn=ExactMatches(strip_strings=True, normalize_pred=harness_triviaqa_normalizer).compute, - category=MetricCategory.GENERATIVE, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) @@ -748,40 +614,35 @@ class Metrics(Enum): sample_level_fn=ExactMatches( strip_strings=True, normalize_pred=gsm8k_normalizer, normalize_gold=gsm8k_normalizer ).compute, - category=MetricCategory.GENERATIVE, - use_case=MetricUseCase.MATH, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) recall_at_1_single_token = SampleLevelMetric( metric_name="acc", sample_level_fn=Recall(at=1).compute, - category=MetricCategory.MULTICHOICE_ONE_TOKEN, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.LOGPROBS, corpus_level_fn=np.mean, higher_is_better=True, ) recall_at_2_single_token = SampleLevelMetric( metric_name="recall@2", sample_level_fn=Recall(at=2).compute, - category=MetricCategory.MULTICHOICE_ONE_TOKEN, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.LOGPROBS, corpus_level_fn=np.mean, higher_is_better=True, ) recall_at_1 = SampleLevelMetric( metric_name="acc", sample_level_fn=Recall(at=1), - category=MetricCategory.MULTICHOICE, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.LOGPROBS, corpus_level_fn=np.mean, higher_is_better=True, ) recall_at_2 = SampleLevelMetric( metric_name="recall@2", sample_level_fn=Recall(at=2), - category=MetricCategory.MULTICHOICE, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.LOGPROBS, corpus_level_fn=np.mean, higher_is_better=True, ) @@ -793,82 +654,73 @@ class Metrics(Enum): normalize_gold=bigbench_normalizer, normalize_pred=bigbench_normalizer, ).compute, - category=MetricCategory.GENERATIVE, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.GENERATIVE, corpus_level_fn={"rouge1": np.mean, "rouge2": np.mean, "rougeL": np.mean, "rougeLsum": np.mean}, higher_is_better={"rouge1": True, "rouge2": True, "rougeL": True, "rougeLsum": True}, ) rouge1 = SampleLevelMetric( metric_name="rouge1", sample_level_fn=ROUGE("rouge1").compute, - category=MetricCategory.GENERATIVE, - use_case=MetricUseCase.SUMMARIZATION, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) rouge2 = SampleLevelMetric( metric_name="rouge2", sample_level_fn=ROUGE("rouge2").compute, - category=MetricCategory.GENERATIVE, - use_case=MetricUseCase.SUMMARIZATION, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) rougeL = SampleLevelMetric( metric_name="rougeL", sample_level_fn=ROUGE("rougeL").compute, - category=MetricCategory.GENERATIVE, - use_case=MetricUseCase.SUMMARIZATION, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) rougeLsum = SampleLevelMetric( metric_name="rougeLsum", sample_level_fn=ROUGE("rougeLsum").compute, - category=MetricCategory.GENERATIVE, - use_case=MetricUseCase.SUMMARIZATION, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) simpleqa_judge = SampleLevelMetricGrouping( metric_name=["simpleqa_judge"], higher_is_better={"simpleqa_judge": True}, - category=MetricCategory.LLM_AS_JUDGE, - use_case=MetricUseCase.SUMMARIZATION, + category=SamplingMethod.GENERATIVE, sample_level_fn=JudgeLLMSimpleQA().compute, + batched_compute=True, corpus_level_fn={ "simpleqa_judge": np.mean, }, ) target_perplexity = SampleLevelMetric( metric_name="ppl", - sample_level_fn=PerplexityPreparator(units_type="words").prepare, - category=MetricCategory.TARGET_PERPLEXITY, - use_case=MetricUseCase.PERPLEXITY, + sample_level_fn=TargetPerplexityPreparator(units_type="words").prepare, + category=SamplingMethod.LOGPROBS, corpus_level_fn=CorpusLevelPerplexityMetric("perplexity").compute, higher_is_better=False, ) ter = CorpusLevelMetric( metric_name="ter", sample_level_fn=GenerativePreparator().prepare, - category=MetricCategory.GENERATIVE, - use_case=MetricUseCase.TRANSLATION, + category=SamplingMethod.GENERATIVE, corpus_level_fn=CorpusLevelTranslationMetric("ter").compute, higher_is_better=False, ) truthfulqa_mc_metrics = SampleLevelMetricGrouping( metric_name=["truthfulqa_mc1", "truthfulqa_mc2"], sample_level_fn=truthfulqa_mc_metrics, - category=MetricCategory.MULTICHOICE, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.LOGPROBS, corpus_level_fn={"truthfulqa_mc1": np.mean, "truthfulqa_mc2": np.mean}, higher_is_better={"truthfulqa_mc1": True, "truthfulqa_mc2": True}, ) word_perplexity = CorpusLevelMetric( metric_name="word_perplexity", sample_level_fn=PerplexityPreparator(units_type="words").prepare, - category=MetricCategory.PERPLEXITY, - use_case=MetricUseCase.SUMMARIZATION, + category=SamplingMethod.PERPLEXITY, corpus_level_fn=CorpusLevelPerplexityMetric("weighted_perplexity").compute, higher_is_better=False, ) @@ -883,15 +735,14 @@ class Metrics(Enum): sample_level_fn=PassAtK( k=1, n=1, - sample_scoring_function=lambda pred, ref, doc: multilingual_extractive_match_metric( + sample_scoring_function=lambda doc, model_response: multilingual_extractive_match_metric( language=Language.ENGLISH, gold_extraction_target=[IndicesExtractionConfig(prefix_for_extraction="NativeLetters")], pred_extraction_target=[IndicesExtractionConfig(prefix_for_extraction="NativeLetters")], precision=6, - ).sample_level_fn([ref], [pred], doc), + ).sample_level_fn(doc, model_response), ).compute, - category=MetricCategory.GENERATIVE_SAMPLING, - use_case=MetricUseCase.REASONING, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) @@ -900,15 +751,14 @@ class Metrics(Enum): sample_level_fn=PassAtK( k=1, n=4, - sample_scoring_function=lambda pred, ref, doc: multilingual_extractive_match_metric( + sample_scoring_function=lambda doc, model_response: multilingual_extractive_match_metric( language=Language.ENGLISH, gold_extraction_target=[IndicesExtractionConfig(prefix_for_extraction="NativeLetters")], pred_extraction_target=[IndicesExtractionConfig(prefix_for_extraction="NativeLetters")], precision=6, - ).sample_level_fn([ref], [pred], doc), + ).sample_level_fn(doc, model_response), ).compute, - category=MetricCategory.GENERATIVE_SAMPLING, - use_case=MetricUseCase.REASONING, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) @@ -917,15 +767,14 @@ class Metrics(Enum): sample_level_fn=PassAtK( k=1, n=8, - sample_scoring_function=lambda pred, ref, doc: multilingual_extractive_match_metric( + sample_scoring_function=lambda doc, model_response: multilingual_extractive_match_metric( language=Language.ENGLISH, gold_extraction_target=[IndicesExtractionConfig(prefix_for_extraction="NativeLetters")], pred_extraction_target=[IndicesExtractionConfig(prefix_for_extraction="NativeLetters")], precision=6, - ).sample_level_fn([ref], [pred], doc), + ).sample_level_fn(doc, model_response), ).compute, - category=MetricCategory.GENERATIVE_SAMPLING, - use_case=MetricUseCase.REASONING, + category=SamplingMethod.GENERATIVE, corpus_level_fn=np.mean, higher_is_better=True, ) @@ -937,8 +786,6 @@ def __str__(self): def higher_is_better(): res = {} for metric in Metrics: - if metric.value.category == MetricCategory.IGNORED: - continue if isinstance(metric.value, MetricGrouping): res.update(metric.value.higher_is_better) else: @@ -946,11 +793,9 @@ def higher_is_better(): return res @staticmethod - def corpus_level_fns(metrics: list[Metric]) -> dict[str, callable]: + def corpus_level_fns(metrics: list[Metric]) -> dict[str, Callable]: res = {} for metric in metrics: - if metric.category == MetricCategory.IGNORED: - continue if isinstance(metric, MetricGrouping): if isinstance(metric.corpus_level_fn, dict): res.update(metric.corpus_level_fn) @@ -966,7 +811,5 @@ def corpus_level_fns(metrics: list[Metric]) -> dict[str, callable]: def all_metrics(): res = [] for metric in Metrics: - if metric.value.category == MetricCategory.IGNORED: - continue res.extend(as_list(metric.value.metric_name)) return res diff --git a/src/lighteval/metrics/metrics_sample.py b/src/lighteval/metrics/metrics_sample.py index 6d3d2ae81..872897290 100644 --- a/src/lighteval/metrics/metrics_sample.py +++ b/src/lighteval/metrics/metrics_sample.py @@ -51,6 +51,7 @@ remove_braces_and_strip, ) from lighteval.metrics.utils.judge_utils import get_judge_prompt_simpleqa, process_judge_response_simpleqa +from lighteval.models.model_output import ModelResponse from lighteval.tasks.requests import Doc from lighteval.utils.utils import as_list, safe_divide @@ -95,7 +96,7 @@ def __init__( ) self.type_exact_match = type_exact_match - def compute(self, golds: list[str], predictions: list[str], **kwargs) -> float: + def compute(self, doc: Doc, model_response: ModelResponse, **kwargs) -> float: """Computes the metric over a list of golds and predictions for one single sample. Args: @@ -107,8 +108,9 @@ def compute(self, golds: list[str], predictions: list[str], **kwargs) -> float: """ results = [] # We might need to flatten golds if they are a list of lists + golds = doc.get_golds() for gold in golds: - for pred in predictions: + for pred in model_response.text: results.append(self.compute_one_item(gold=gold, pred=pred)) return self.aggregation_function(results) @@ -172,7 +174,7 @@ def __init__( self.normalize_pred = normalize_pred self.strip_strings = strip_strings - def compute(self, golds: list[str], predictions: list[str], **kwargs) -> float: + def compute(self, doc: Doc, model_response: ModelResponse, **kwargs) -> float: """Computes the metric over a list of golds and predictions for one single sample. Args: @@ -183,6 +185,8 @@ def compute(self, golds: list[str], predictions: list[str], **kwargs) -> float: float: Aggregated score over the current sample's items. """ results = [] + golds = doc.get_golds() + predictions = model_response.text # We might need to flatten golds if they are a list of lists for gold in golds: for pred in predictions: @@ -228,11 +232,8 @@ def __init__(self, logprob_normalization: LogProbNormalization | None = None): # Solve the choices token lengths properly def compute( self, - gold_ixs: list[int], - choices_logprob: list[float], - unconditioned_logprob: list[float] | None, - choices_tokens: list[list[int]] | None, - formatted_doc: Doc, + doc: Doc, + model_response: ModelResponse, **kwargs, ) -> int: """Computes the log likelihood accuracy: is the choice with the highest logprob in `choices_logprob` present @@ -249,17 +250,26 @@ def compute( Returns: int: The eval score: 1 if the best log-prob choice is in gold, 0 otherwise. """ + n_choices = len(doc.choices) + choices_logprobs = model_response.logprobs[:n_choices] + unconditioned_logprobs = None + + if len(model_response.logprobs) == n_choices * 2: + unconditioned_logprobs = model_response.logprobs[n_choices : n_choices * 2] + + gold_ixs = as_list(doc.gold_index) + choices_tokens = model_response.output_tokens[:n_choices] normalized_log_probs = ( normalize_log_probs( self.logprob_normalization, - choices_logprob, - unconditioned_logprob, - formatted_doc.choices, + choices_logprobs, + unconditioned_logprobs, + doc.choices, choices_tokens, ) if self.logprob_normalization - else choices_logprob + else choices_logprobs ) best_choice = np.argmax(normalized_log_probs) @@ -284,11 +294,8 @@ def __init__( def compute( self, - gold_ixs: list[int], - choices_logprob: list[float], - unconditioned_logprob: list[float] | None, - choices_tokens: list[list[int]] | None, - formatted_doc: Doc, + doc: Doc, + model_response: ModelResponse, **kwargs, ) -> float: """Computes the log likelihood probability: chance of choosing the best choice. @@ -304,17 +311,26 @@ def compute( Returns: float: The probability of the best log-prob choice being a gold choice. """ + n_choices = len(doc.choices) + choices_logprobs = model_response.logprobs[:n_choices] + unconditioned_logprobs = None + + if len(model_response.logprobs) == n_choices * 2: + unconditioned_logprobs = model_response.logprobs[n_choices : n_choices * 2] + + gold_ixs = as_list(doc.gold_index) + choices_tokens = model_response.output_tokens[:n_choices] normalized_log_probs = ( normalize_log_probs( self.log_prob_normalization, - choices_logprob, - unconditioned_logprob, - formatted_doc.choices, + choices_logprobs, + unconditioned_logprobs, + doc.choices, choices_tokens, ) if self.log_prob_normalization - else choices_logprob + else choices_logprobs ) normalized_probs = np.exp(normalized_log_probs) @@ -341,9 +357,8 @@ def __init__( def compute( self, - logprobs: list[float], - target_tokens: list[list[int]] | None = None, - reference_texts: list[str] | None = None, + doc: Doc, + model_response: ModelResponse, **kwargs, ) -> float: """Computes the log likelihood probability: chance of choosing the best choice. @@ -358,11 +373,14 @@ def compute( Returns: float: The probability of the best log-prob choice being a gold choice. """ + choices_tokens = model_response.output_tokens + logprobs = model_response.logprobs + reference_texts = doc.choices normalized_log_probs = ( normalize_log_probs( normalization=self.log_prob_normalization, - choices_tokens=target_tokens, + choices_tokens=choices_tokens, choices_logprob=logprobs, choices_text=reference_texts, unconditioned_logprob=None, @@ -384,7 +402,7 @@ def __init__(self, at: int) -> None: """ self.recall_depth = at - def compute(self, choices_logprob: list[float], gold_ixs: list[int], **kwargs) -> int: + def compute(self, doc: Doc, model_response: ModelResponse, **kwargs) -> int: """Computes the recall at the requested depth level: looks at the `n` best predicted choices (with the highest log probabilities) and see if there is an actual gold among them. @@ -395,9 +413,12 @@ def compute(self, choices_logprob: list[float], gold_ixs: list[int], **kwargs) - Returns: int: Score: 1 if one of the top level predicted choices was correct, 0 otherwise. """ + choices_logprobs = model_response.logprobs + gold_ixs = as_list(doc.gold_index) + if self.recall_depth == 1: - return int(np.argmax(choices_logprob) in gold_ixs) - return (int(any(ix in gold_ixs for ix in np.array(choices_logprob).argsort()[::-1][: self.recall_depth])),) + return int(np.argmax(choices_logprobs) in gold_ixs) + return int(any(ix in gold_ixs for ix in np.array(choices_logprobs).argsort()[::-1][: self.recall_depth])) class MRR: @@ -409,7 +430,7 @@ def __init__(self, length_normalization: bool = False): """ self.length_normalization = length_normalization - def compute(self, choices_logprob: list[float], gold_ixs: list[float], formatted_doc: Doc, **kwargs) -> float: + def compute(self, model_response: ModelResponse, doc: Doc, **kwargs) -> float: """Mean reciprocal rank. Measures the quality of a ranking of choices (ordered by correctness). Args: @@ -421,13 +442,20 @@ def compute(self, choices_logprob: list[float], gold_ixs: list[float], formatted Returns: float: MRR score. """ + choices_logprobs = model_response.logprobs + gold_ixs = as_list(doc.gold_index) + choices_logprobs = model_response.logprobs + if self.length_normalization: - choices_logprob = [choices_logprob[ix] / len(formatted_doc.choices[ix]) for ix in len(choices_logprob)] - ranked_choices = [sorted(choices_logprob, reverse=True).index(choices_logprob[gold]) for gold in gold_ixs] + choices_logprobs = [ + choice_logprob / len(choice) for choice_logprob, choice in zip(choices_logprobs, doc.choices) + ] + + ranked_choices = [sorted(choices_logprobs, reverse=True).index(choices_logprobs[gold]) for gold in gold_ixs] return 1.0 / (min(ranked_choices) + 1) -def acc_golds_likelihood(argmax_logits_eq_gold_list: list[int], **kwargs) -> int: +def acc_golds_likelihood(doc, model_response, **kwargs) -> int: """Tests if at least one of predicted gold targets' argmax of logits equals the gold. Args: @@ -436,7 +464,7 @@ def acc_golds_likelihood(argmax_logits_eq_gold_list: list[int], **kwargs) -> int Returns: int: 1 if at least one of the possible golds has argmax of logits == gold, 0 otherwise """ - return int(any(argmax_logits_eq_gold_list)) + return int(any(model_response.argmax_logits_eq_gold)) class ROUGE: @@ -447,9 +475,9 @@ def __init__( methods: str | list[str], multiple_golds: bool = False, bootstrap: bool = False, - normalize_gold: callable = None, - normalize_pred: callable = None, - aggregation_function: callable = None, + normalize_gold: Callable | None = None, + normalize_pred: Callable | None = None, + aggregation_function: Callable | None = None, tokenizer: object = None, ): """A ROUGE wrapper method. Relies on `rouge_scorer`. @@ -486,7 +514,7 @@ def __init__( self.tokenizer = tokenizer self.scorer = None - def compute(self, golds: list[str], predictions: list[str], **kwargs) -> float | dict: + def compute(self, doc: Doc, model_response: ModelResponse, **kwargs) -> float | dict: """Computes the metric(s) over a list of golds and predictions for one single sample. Args: @@ -499,6 +527,9 @@ def compute(self, golds: list[str], predictions: list[str], **kwargs) -> float | """ from rouge_score import rouge_scorer + golds = doc.get_golds() + predictions = model_response.text + if self.scorer is None: self.scorer = rouge_scorer.RougeScorer(self.methods, tokenizer=self.tokenizer) @@ -537,11 +568,11 @@ def _rouge_score_multi_golds(self, golds: list[str], preds: list[str]): scores[method].append(cur_scores[method].fmeasure) return {method: self.aggregation_function(scores[method]) for method in self.methods} - def _rouge_score_with_bootsrap(self, golds: list[str], preds: list[str]): + def _rouge_score_with_bootsrap(self, golds: list[str], predictions: list[str]): from rouge_score import scoring aggregator = scoring.BootstrapAggregator() - for g, p in zip(golds, preds): + for g, p in zip(golds, predictions): aggregator.add_scores(self.scorer.score(g, p)) result = aggregator.aggregate() return {method: result[method].mid.fmeasure * 100 for method in self.methods} @@ -550,8 +581,8 @@ def _rouge_score_with_bootsrap(self, golds: list[str], preds: list[str]): class BertScore: def __init__( self, - normalize_gold: callable = None, - normalize_pred: callable = None, + normalize_gold: Callable | None = None, + normalize_pred: Callable | None = None, ): r"""A BERT scorer class. Relies on some called extracted from `bert-score`. By default, will use the `microsoft/deberta-large-mnli` as scorer. For each tokenized (pred, target) pair, it computes Precision, @@ -577,7 +608,7 @@ def __init__( self.normalize_gold = normalize_gold self.normalize_pred = normalize_pred - def compute(self, golds: list[str], predictions: list[str], **kwargs) -> dict: + def compute(self, doc: Doc, model_response: ModelResponse, **kwargs) -> dict[str, float]: """Computes the prediction, recall and f1 score using the bert scorer. Args: @@ -587,6 +618,9 @@ def compute(self, golds: list[str], predictions: list[str], **kwargs) -> dict: Returns: dict: Scores over the current sample's items. """ + golds = doc.get_golds() + predictions = model_response.text + if self.bert_scorer is None: logger.warning("The first metric computation step might be a bit longer as we need to download the model.") # We only initialize on first compute @@ -628,7 +662,7 @@ def __init__( self.normalize_pred = normalize_pred self.input_column = input_column - def compute(self, predictions: list[str], formatted_doc: Doc, **kwargs) -> dict[str, float]: + def compute(self, doc: Doc, model_response: ModelResponse, **kwargs) -> dict[str, float]: """ Compute the extractiveness of the predictions. @@ -645,8 +679,8 @@ def compute(self, predictions: list[str], formatted_doc: Doc, **kwargs) -> dict[ if self.stats_metric is None: self.stats_metric = DataStatsMetric() - inp = formatted_doc.specific[self.input_column] - prediction = predictions[0] + inp = doc.specific[self.input_column] + prediction = model_response.text[0] if self.normalize_input: inp = self.normalize_input(inp) if self.normalize_pred: @@ -663,8 +697,8 @@ def compute(self, predictions: list[str], formatted_doc: Doc, **kwargs) -> dict[ class Faithfulness: def __init__( self, - normalize_input: callable = remove_braces, - normalize_pred: callable = remove_braces_and_strip, + normalize_input: Callable = remove_braces, + normalize_pred: Callable = remove_braces_and_strip, input_column: str = "text", ): """ @@ -682,7 +716,7 @@ def __init__( self.normalize_pred = normalize_pred self.input_column = input_column - def compute(self, predictions: list[str], formatted_doc: Doc, **kwargs) -> dict[str, float]: + def compute(self, doc: Doc, model_response: ModelResponse, **kwargs) -> dict[str, float]: """ Compute the faithfulness of the predictions. @@ -699,7 +733,8 @@ def compute(self, predictions: list[str], formatted_doc: Doc, **kwargs) -> dict[ self.summac = SummaCZS( granularity="sentence", model_name="vitc", imager_load_cache=False ) # , device=device) - inp = formatted_doc.specific[self.input_column] + inp = doc.specific[self.input_column] + predictions = model_response.text prediction = predictions[0] if self.normalize_input: inp = self.normalize_input(inp) @@ -729,7 +764,7 @@ def model(self): self._model.eval() return self._model - def compute(self, golds: list[str], predictions: list[str], **kwargs) -> float: + def compute(self, doc: Doc, model_response: ModelResponse, **kwargs) -> float: """Uses the stored BLEURT scorer to compute the score on the current sample. Args: @@ -739,6 +774,8 @@ def compute(self, golds: list[str], predictions: list[str], **kwargs) -> float: Returns: float: Score over the current sample's items. """ + predictions = model_response.text + golds = doc.get_golds() if len(predictions) == 1: predictions = predictions * len(golds) scores = self.model(**self.tokenizer(golds, predictions, return_tensors="pt"))[0].squeeze() @@ -755,7 +792,7 @@ def __init__(self, n_gram: int): """ self.n_gram = n_gram - def compute(self, golds: list[str], predictions: list[str], **kwargs): + def compute(self, doc: Doc, model_response: ModelResponse, **kwargs): """Computes the sentence level BLEU between the golds and each prediction, then takes the average. Args: @@ -765,9 +802,11 @@ def compute(self, golds: list[str], predictions: list[str], **kwargs): Returns: float: Score over the current sample's items. """ + golds = doc.get_golds() + predictions = model_response.text return np.mean([self._bleu_score(golds, p) for p in predictions]) - def _bleu_score(self, gold: list[str], pred: str) -> float: + def _bleu_score(self, gold: list[str], pred: str): """Computes the BLEU score between a list of golds and the current prediction. Args: @@ -803,7 +842,7 @@ def __init__( self.strip_prediction = strip_prediction self.sample_aggregations = {"longest_common_prefix_length": max, "edit_distance": min, "edit_similarity": max} - def compute(self, golds: list[str], predictions: list[str], **kwargs) -> dict: + def compute(self, doc: Doc, model_response: ModelResponse, **kwargs): """Computes all the requested metrics on the golds and prediction. Args: @@ -813,6 +852,8 @@ def compute(self, golds: list[str], predictions: list[str], **kwargs) -> dict: Returns: dict: The different scores computed """ + predictions = model_response.text + golds = doc.get_golds() if len(golds) > 1: logger.warning( "Provided more than one gold to compute a string distance metric. Just using the first one." @@ -823,6 +864,8 @@ def compute(self, golds: list[str], predictions: list[str], **kwargs) -> dict: for sequence in predictions: if self.strip_prediction: completion = sequence.strip() + else: + completion = sequence # `reference` is the entire remaining book for each instance. # Truncate it here to be of the same length as the completion to ensure edit-distance is meaningful. @@ -878,7 +921,7 @@ def __init__( process_judge_response: Callable, judge_backend: Literal["litellm", "openai", "transformers", "vllm", "tgi", "inference-providers"], short_judge_name: str | None = None, - response_format: BaseModel = None, + response_format: BaseModel | None = None, url: str | None = None, hf_provider: str | None = None, max_tokens: int | None = None, @@ -930,7 +973,7 @@ def __init__( max_tokens=max_tokens, ) - def compute(self, predictions: list[str], formatted_doc: Doc, **kwargs) -> dict[str, float]: + def compute(self, responses: list[ModelResponse], docs: list[Doc], **kwargs) -> list: raise NotImplementedError("This method should be implemented in the subclass.") @@ -944,22 +987,22 @@ def __init__(self): short_judge_name="gpt4o", ) - def compute(self, sample_ids: list[str], responses: list, formatted_docs: list[Doc], **kwargs) -> dict[str, float]: + def compute(self, responses: list[ModelResponse], docs: list[Doc], **kwargs) -> list: """ Compute the score of a generative task using a llm as a judge. The generative task can be multiturn with 2 turns max, in that case, we return scores for turn 1 and 2. Also returns user_prompt and judgement which are ignored later by the aggregator. """ - questions = [formatted_doc.query for formatted_doc in formatted_docs] - options = [formatted_doc.choices for formatted_doc in formatted_docs] - golds = [formatted_doc.get_golds()[0] for formatted_doc in formatted_docs] - predictions = [response[0].result[0] for response in responses] + questions = [formatted_doc.query for formatted_doc in docs] + options = [formatted_doc.choices for formatted_doc in docs] + golds = [formatted_doc.get_golds()[0] for formatted_doc in docs] + predictions = [response.text[0] for response in responses] scores, messages, judgements = self.judge.evaluate_answer_batch(questions, predictions, options, golds) metrics = [] - for i in range(len(sample_ids)): + for i in range(len(docs)): metrics.append( { "simpleqa_judge": scores[i], @@ -972,7 +1015,7 @@ def compute(self, sample_ids: list[str], responses: list, formatted_docs: list[D class JudgeLLMMTBench(JudgeLLM): - def compute(self, predictions: list[str], formatted_doc: Doc, **kwargs): + def compute(self, model_response: list[ModelResponse], docs: list[Doc], **kwargs): """ Compute the score of a generative task using a llm as a judge. The generative task can be multiturn with 2 turns max, in that case, we @@ -982,8 +1025,9 @@ def compute(self, predictions: list[str], formatted_doc: Doc, **kwargs): import json # If we are evaluating a multiturn task, we need to have specific field in the formatted doc - questions = formatted_doc.specific["multi_turn_queries"] - golds = formatted_doc.specific.get("reference", None) + questions = [doc.specific["multi_turn_queries"] for doc in docs] + golds = [doc.specific.get("reference", None) for doc in docs] + predictions = [response.text[0] for response in model_response] query_context_1 = {"query": questions[0], "context": ""} query_context_2 = {"query": questions[1], "context": predictions[0]} @@ -1004,22 +1048,22 @@ def compute(self, predictions: list[str], formatted_doc: Doc, **kwargs): class JudgeLLMMixEval(JudgeLLM): - def compute(self, sample_ids: list[str], responses: list, formatted_docs: list[Doc], **kwargs) -> dict[str, float]: + def compute(self, model_responses: list[ModelResponse], docs: list[Doc], **kwargs): """ Compute the score of a generative task using a llm as a judge. The generative task can be multiturn with 2 turns max, in that case, we return scores for turn 1 and 2. Also returns user_prompt and judgement which are ignored later by the aggregator. """ - questions = [formatted_doc.specific["question"] for formatted_doc in formatted_docs] - options = [formatted_doc.choices for formatted_doc in formatted_docs] - golds = [formatted_doc.get_golds()[0] for formatted_doc in formatted_docs] - predictions = [response[0].result[0] for response in responses] + questions = [doc.specific["question"] for doc in docs] + options = [doc.choices for doc in docs] + golds = [doc.get_golds()[0] for doc in docs] + predictions = [response.text[0] for response in model_responses] scores, messages, judgements = self.judge.evaluate_answer_batch(questions, predictions, options, golds) metrics = [] - for i in range(len(sample_ids)): + for i in range(len(docs)): metrics.append( { f"judge_score_{self.short_judge_name}": scores[i], @@ -1035,8 +1079,8 @@ class MajAtK: def __init__( self, k: int, - normalize_gold: callable = None, - normalize_pred: callable = None, + normalize_gold: Callable | None = None, + normalize_pred: Callable | None = None, strip_strings: bool = False, type_exact_match: str = "full", ): @@ -1066,7 +1110,7 @@ def __init__( ) self.type_exact_match = type_exact_match - def compute(self, golds: list[str], predictions: list[str], **kwargs) -> dict[str, float]: + def compute(self, model_response: ModelResponse, docs: Doc, **kwargs): """Computes the metric over a list of golds and predictions for one single sample. It applies normalisation (if needed) to model prediction and gold, and takes the most frequent answer of all the available ones, then compares it to the gold. @@ -1078,6 +1122,8 @@ def compute(self, golds: list[str], predictions: list[str], **kwargs) -> dict[st Returns: float: Aggregated score over the current sample's items. """ + golds = docs.get_golds() + predictions = model_response.text if len(golds) > 1: raise Exception("Cannot compute maj@k with several golds") @@ -1088,7 +1134,7 @@ def compute(self, golds: list[str], predictions: list[str], **kwargs) -> dict[st majority_prediction = max(all_answers, key=all_answers.count) return self.compute_score(majority_prediction, gold) - def get_processed_gold(self, gold: str) -> float: + def get_processed_gold(self, gold: str) -> str: if self.strip_strings: gold = gold.strip() @@ -1097,7 +1143,7 @@ def get_processed_gold(self, gold: str) -> float: return gold - def get_processed_pred(self, pred: str) -> float: + def get_processed_pred(self, pred: str) -> str: if not pred: return "" @@ -1121,11 +1167,11 @@ class PassAtK: def __init__( self, k: int, - n: int = None, - normalize_gold: Callable = None, - normalize_pred: Callable = None, + n: int | None = None, + normalize_gold: Callable | None = None, + normalize_pred: Callable | None = None, strip_strings: bool = False, - sample_scoring_function: Union[Callable[[str, str], float], str] = None, + sample_scoring_function: Callable[[Doc, ModelResponse], float] | str | None = None, ): """Computing pass at k @@ -1165,9 +1211,7 @@ def __init__( self.type_exact_match = "full" self.score_sample = self.default_sample_scoring - def compute( - self, golds: list[str], predictions: list[str], formatted_doc: Doc = None, **kwargs - ) -> dict[str, float]: + def compute(self, doc: Doc, model_response: ModelResponse, **kwargs) -> float: """Computes the metric over a list of golds and predictions for one single item with possibly many samples. It applies normalisation (if needed) to model prediction and gold, computes their per prediction score, then aggregates the scores over the samples using a pass@k. @@ -1179,6 +1223,8 @@ def compute( Returns: float: Aggregated score over the current sample's items. """ + golds = doc.get_golds() + predictions = model_response.text if len(golds) > 1: raise Exception("Cannot compute pass@k with several golds") @@ -1188,16 +1234,24 @@ def compute( elif len(predictions) < self.n: logger.warning(f"Number of predictions is less than {self.n} for pass@k.") - gold = self.get_processed_gold(golds[0]) + processed_choices = [self.get_processed_gold(gold=g) for g in doc.choices] + new_doc = Doc( + choices=processed_choices, + query=doc.query, + gold_index=doc.gold_index, + ) all_scores = [] for pred in predictions[: self.n]: cur_pred = self.get_processed_pred(pred=pred) - all_scores.append(self.score_sample(cur_pred, gold, formatted_doc)) + new_model_response = ModelResponse( + text=[cur_pred], + ) + all_scores.append(self.score_sample(new_doc, new_model_response)) return self.pass_at_k(all_scores) - def get_processed_gold(self, gold: str) -> float: + def get_processed_gold(self, gold: str) -> str: if self.strip_strings: gold = gold.strip() @@ -1206,7 +1260,7 @@ def get_processed_gold(self, gold: str) -> float: return gold - def get_processed_pred(self, pred: str) -> float: + def get_processed_pred(self, pred: str) -> str: if not pred: return "" @@ -1218,7 +1272,10 @@ def get_processed_pred(self, pred: str) -> float: return pred - def default_sample_scoring(self, pred: str, gold: str) -> int: + def default_sample_scoring(self, doc, model_response) -> int: + pred = model_response.text[0] + gold = doc.get_golds()[0] + if self.type_exact_match == "prefix": return 1 if pred.startswith(gold) else 0 if self.type_exact_match == "suffix": @@ -1238,12 +1295,12 @@ class GPassAtK: def __init__( self, k: Union[int, list[int]], - n: int = None, + n: int | None = None, thresholds: list[float] = [0.0, 0.25, 0.5, 0.75, 1.0], - normalize_gold: Callable = None, - normalize_pred: Callable = None, + normalize_gold: Callable | None = None, + normalize_pred: Callable | None = None, strip_strings: bool = False, - sample_scoring_function: Union[Callable[[str, str], float], str] = None, + sample_scoring_function: Callable[[Doc, ModelResponse], float] | str | None = None, ): """Computing G-Pass@k from http://arxiv.org/abs/2412.13147 @@ -1285,7 +1342,7 @@ def __init__( self.type_exact_match = "full" self.score_sample = self.default_sample_scoring - def compute(self, predictions: list[str], formatted_doc: list[Doc], **kwargs) -> dict[str, float]: + def compute(self, model_response: ModelResponse, doc: Doc, **kwargs) -> float: """Computes the metric over a list of golds and predictions for one single item with possibly many samples. It applies normalisation (if needed) to model prediction and gold, computes their per prediction score, then aggregates the scores over the samples using a pass@k. @@ -1297,7 +1354,8 @@ def compute(self, predictions: list[str], formatted_doc: list[Doc], **kwargs) -> Returns: float: Aggregated score over the current sample's items. """ - golds = formatted_doc.get_golds() + golds = doc.get_golds() + predictions = model_response.text if len(golds) > 1: raise Exception("Cannot compute G-Pass@k with several golds") @@ -1310,12 +1368,20 @@ def compute(self, predictions: list[str], formatted_doc: list[Doc], **kwargs) -> elif len(predictions) < self.n: logger.warning(f"Number of predictions is less than {self.n} for G-Pass@k.") - gold = self.get_processed_gold(golds[0]) + processed_choices = [self.get_processed_gold(gold=g) for g in doc.choices] + new_doc = Doc( + choices=processed_choices, + query=doc.query, + gold_index=doc.gold_index, + ) all_scores = [] for pred in predictions[: self.n]: cur_pred = self.get_processed_pred(pred=pred) - all_scores.append(self.score_sample(cur_pred, gold, formatted_doc)) + new_model_response = ModelResponse( + text=[cur_pred], + ) + all_scores.append(self.score_sample(new_doc, new_model_response)) return self.g_pass_at_k(all_scores) @@ -1340,7 +1406,9 @@ def get_processed_pred(self, pred: str) -> str: return pred - def default_sample_scoring(self, pred: str, gold: str) -> int: + def default_sample_scoring(self, doc: Doc, model_response: ModelResponse) -> int: + gold = doc.get_golds()[0] + pred = model_response.text[0] if self.type_exact_match == "prefix": return 1 if pred.startswith(gold) else 0 if self.type_exact_match == "suffix": diff --git a/src/lighteval/metrics/normalizations.py b/src/lighteval/metrics/normalizations.py index 6c4ebfb66..565a5e379 100644 --- a/src/lighteval/metrics/normalizations.py +++ b/src/lighteval/metrics/normalizations.py @@ -468,6 +468,8 @@ def normalize_log_probs( match normalization: case LogProbCharNorm(ignore_first_space=True): assert choices_text is not None, "choices_text must be provided for character normalization" + if len(choices_text) != len(choices_logprob): + raise ValueError("choices_text and choices_logprob must have the same length") normalized_log_probs = [ choices_logprob[ix] / (len(choice) - 1 if choice[0] == " " else len(choice)) for ix, choice in enumerate(choices_text) diff --git a/src/lighteval/metrics/sample_preparator.py b/src/lighteval/metrics/sample_preparator.py index 4fafa509b..830326fc2 100644 --- a/src/lighteval/metrics/sample_preparator.py +++ b/src/lighteval/metrics/sample_preparator.py @@ -26,6 +26,10 @@ import numpy as np +from lighteval.models.model_output import ModelResponse +from lighteval.tasks.requests import Doc +from lighteval.utils.utils import as_list + logger = logging.getLogger(__name__) @@ -58,7 +62,7 @@ class PerplexityCorpusMetricInput(CorpusMetricInput): class GenerativePreparator: @staticmethod - def prepare(golds: list[str], predictions: list[str], **kwargs): + def prepare(doc: Doc, model_response: ModelResponse, **kwargs): """Prepares an individual generative example to the format expected by metrics computed at the corpus level (aggregated). Args: @@ -68,6 +72,8 @@ def prepare(golds: list[str], predictions: list[str], **kwargs): Returns: GenerativeCorpusMetricInput: Stores the golds and predictions as such """ + golds = as_list(doc.get_golds()) + predictions = model_response.text return GenerativeCorpusMetricInput(golds=golds, preds=predictions) @@ -81,7 +87,7 @@ def __init__(self, is_single_token: bool = False): """ self.is_single_token = is_single_token - def prepare(self, gold_ixs: list[int], choices_logprob: list[float], **kwargs) -> LogprobCorpusMetricInput: + def prepare(self, doc: Doc, model_response: ModelResponse, **kwargs) -> LogprobCorpusMetricInput: """Prepares an individual loglikelihood example to the format expected by metrics computed at the corpus level (aggregated). Args: @@ -92,6 +98,8 @@ def prepare(self, gold_ixs: list[int], choices_logprob: list[float], **kwargs) - LogprobCorpusMetricInput: Stores the golds indices and the model's choice (choice with the highest logprob) Only the first gold index is taken for a single token loglikelihood metric """ + gold_ixs = as_list(doc.gold_index) + choices_logprob = model_response.logprobs if self.is_single_token: if len(gold_ixs) > 1: logger.warning( @@ -102,6 +110,51 @@ def prepare(self, gold_ixs: list[int], choices_logprob: list[float], **kwargs) - return LogprobCorpusMetricInput(golds=gold_ixs, preds=np.argmax(choices_logprob)) +class TargetPerplexityPreparator: + def __init__(self, units_type: str) -> None: + """Init. + + Args: + units_type (str): Basic type of text units we want to use to weight perplexity computations. + Can be `words` or `bytes` + + Raises: + ValueError: If the unit type is not words or byte, raises a ValueError + """ + if units_type not in ["words", "bytes"]: + raise ValueError("Perplexity must be computed at either the word or byte level.") + self.units_type = units_type + + def count_units(self, text: str) -> int: + """Counts the given number of unit in the input text. + + Args: + text (str): Input text + + Returns: + int: Number of units of type `self.units_type` in the input text. + """ + if self.units_type == "words": + return len(re.split(r"\s+", text)) + if self.units_type == "bytes": + return len(text.encode("utf-8")) + + def prepare(self, doc: Doc, model_response: ModelResponse, **kwargs): + """Prepares an individual perplexity example to the format expected by metrics computed at the corpus level (aggregated). + + Args: + logprobs (list[float]): List of the log-probabilities computed for each item of the sequence or single aggregated logprob over the sequence + reference_text (str): Current reference text for which to compute the length in self.units_type + + Returns: + PerplexityCorpusMetricInput: Stores the measured logprobs and associated text lengths, counted in the reference unit. + """ + + logprobs_flat = np.sum(model_response.logprobs) + reference_text_flat = " ".join(doc.get_golds()) + return PerplexityCorpusMetricInput(logprobs=logprobs_flat, weights=self.count_units(reference_text_flat)) + + class PerplexityPreparator: def __init__(self, units_type: str) -> None: """Init. @@ -131,7 +184,7 @@ def count_units(self, text: str) -> int: if self.units_type == "bytes": return len(text.encode("utf-8")) - def prepare(self, logprobs: list[float], reference_texts: list[str], **kwargs): + def prepare(self, doc: Doc, model_response: ModelResponse, **kwargs): """Prepares an individual perplexity example to the format expected by metrics computed at the corpus level (aggregated). Args: @@ -142,6 +195,11 @@ def prepare(self, logprobs: list[float], reference_texts: list[str], **kwargs): PerplexityCorpusMetricInput: Stores the measured logprobs and associated text lengths, counted in the reference unit. """ - logprobs_flat = np.sum(logprobs) - reference_text_flat = " ".join(reference_texts) + logprobs_flat = np.sum(model_response.logprobs) + + if doc.original_query is not None: + reference_text_flat = " ".join([doc.original_query]) + else: + reference_text_flat = " ".join([doc.query]) + return PerplexityCorpusMetricInput(logprobs=logprobs_flat, weights=self.count_units(reference_text_flat)) diff --git a/src/lighteval/metrics/utils/metric_utils.py b/src/lighteval/metrics/utils/metric_utils.py index cb9f5e744..78d30c59a 100644 --- a/src/lighteval/metrics/utils/metric_utils.py +++ b/src/lighteval/metrics/utils/metric_utils.py @@ -21,46 +21,20 @@ # SOFTWARE. from dataclasses import dataclass -from enum import Enum, auto - - -class MetricCategory(str, Enum): - TARGET_PERPLEXITY = auto() - PERPLEXITY = auto() - GENERATIVE = auto() - GENERATIVE_LOGPROB = auto() - GENERATIVE_SAMPLING = auto() - LLM_AS_JUDGE_MULTI_TURN = auto() - LLM_AS_JUDGE = auto() - MULTICHOICE = auto() - MULTICHOICE_PMI = auto() - MULTICHOICE_ONE_TOKEN = auto() - IGNORED = auto() - - -class MetricUseCase(str, Enum): - # General - ACCURACY = auto() - PERPLEXITY = auto() - # Task specific - CODE = auto() - COPYRIGHT = auto() - MATH = auto() - REASONING = auto() - SOCIAL_IMPACTS = auto() - SUMMARIZATION = auto() - TRANSLATION = auto() - NONE = auto() +from typing import Callable + +from lighteval.tasks.requests import SamplingMethod @dataclass class Metric: metric_name: str higher_is_better: bool - category: MetricCategory - use_case: MetricUseCase - sample_level_fn: callable - corpus_level_fn: callable + category: SamplingMethod + sample_level_fn: Callable + corpus_level_fn: Callable + + batched_compute: bool = False def get_doc(self): return self.sample_level_fn.__doc__ @@ -68,8 +42,6 @@ def get_doc(self): def compute( self, **kwargs ) -> dict: # result: Union[list[ModelResponse], ModelResponse], formatted_doc: Doc) -> dict: - if self.category == MetricCategory.IGNORED: - return {} if isinstance(self, MetricGrouping): return self.sample_level_fn(**kwargs) # result, formatted_doc, return {self.metric_name: self.sample_level_fn(**kwargs)} # result, formatted_doc, @@ -82,8 +54,8 @@ class MetricGrouping(Metric): """ metric_name: list[str] - corpus_level_fn: dict[str:callable] - higher_is_better: dict[str:callable] + corpus_level_fn: dict[str, Callable] + higher_is_better: dict[str, Callable] @dataclass diff --git a/src/lighteval/models/abstract_model.py b/src/lighteval/models/abstract_model.py index 78e1768f1..6152c75cf 100644 --- a/src/lighteval/models/abstract_model.py +++ b/src/lighteval/models/abstract_model.py @@ -25,22 +25,10 @@ from typing import Optional, Union import torch -from transformers import BatchEncoding, PreTrainedTokenizerBase - -from lighteval.models.model_output import ( - GenerativeMultiturnResponse, - GenerativeResponse, - LoglikelihoodResponse, - LoglikelihoodSingleTokenResponse, -) -from lighteval.tasks.requests import ( - GreedyUntilMultiTurnRequest, - GreedyUntilRequest, - LoglikelihoodRequest, - LoglikelihoodRollingRequest, - LoglikelihoodSingleTokenRequest, - RequestType, -) +from transformers.tokenization_utils_base import BatchEncoding, PreTrainedTokenizerBase + +from lighteval.models.model_output import ModelResponse +from lighteval.tasks.requests import Doc TokenSequence = Union[list[int], torch.LongTensor, torch.Tensor, BatchEncoding] @@ -49,9 +37,9 @@ @dataclass class ModelInfo: model_name: str - model_sha: Optional[str] = None - model_dtype: Optional[str] = None - model_size: Optional[str] = None + model_sha: str | None = None + model_dtype: str | None = None + model_size: int | None = None class LightevalModel(ABC): @@ -84,37 +72,16 @@ def max_length(self) -> int: def disable_tqdm(self) -> bool: return False - def get_method_from_request_type(self, request_type: RequestType): - if request_type == RequestType.LOGLIKELIHOOD: - return self.loglikelihood - if request_type == RequestType.LOGLIKELIHOOD_SINGLE_TOKEN: - return self.loglikelihood_single_token - if request_type == RequestType.LOGLIKELIHOOD_ROLLING: - return self.loglikelihood_rolling - if request_type == RequestType.GREEDY_UNTIL: - return self.greedy_until - if request_type == RequestType.GREEDY_UNTIL_MULTI_TURN: - return self.greedy_until_multi_turn - raise NotImplementedError(f"Request type {request_type} not supported") - - def greedy_until_multi_turn( # noqa: C901 - self, requests: list[GreedyUntilMultiTurnRequest] - ) -> GenerativeMultiturnResponse: - """Generates responses using a greedy decoding strategy until certain ending conditions are met.""" - return NotImplemented - @abstractmethod def greedy_until( self, - requests: list[GreedyUntilRequest], - ) -> list[GenerativeResponse]: + docs: list[Doc], + ) -> list[ModelResponse]: """ Generates responses using a greedy decoding strategy until certain ending conditions are met. Args: - requests (list[Request]): list of requests containing the context and ending conditions. - disable_tqdm (bool, optional): Whether to disable the progress bar. Defaults to False. - override_bs (int, optional): Override the batch size for generation. Defaults to None. + docs (list[Doc]): List of documents containing the context for generation. Returns: list[GenerativeResponse]: list of generated responses. @@ -122,26 +89,17 @@ def greedy_until( return NotImplemented @abstractmethod - def loglikelihood(self, requests: list[LoglikelihoodRequest]) -> list[LoglikelihoodResponse]: + def loglikelihood(self, docs: list[Doc]) -> list[ModelResponse]: """Tokenize the context and continuation and compute the log likelihood of those tokenized sequences. """ return NotImplemented @abstractmethod - def loglikelihood_rolling(self, requests: list[LoglikelihoodRollingRequest]) -> list[LoglikelihoodResponse]: + def loglikelihood_rolling(self, docs: list[Doc]) -> list[ModelResponse]: """This function is used to compute the log likelihood of the context for perplexity metrics.""" return NotImplemented - @abstractmethod - def loglikelihood_single_token( - self, requests: list[LoglikelihoodSingleTokenRequest] - ) -> list[LoglikelihoodSingleTokenResponse]: - """Tokenize the context and continuation and compute the log likelihood of those - tokenized sequences. - """ - return NotImplemented - # Tokenization utils def tok_encode(self, str_to_encode: str | list[str], add_special_tokens: Optional[bool] = None) -> TokenSequence: if add_special_tokens is None: @@ -155,54 +113,57 @@ def tok_encode(self, str_to_encode: str | list[str], add_special_tokens: Optiona return_tensors="pt", ) - def tok_encode_pair(self, context, continuation, pairwise: bool = False): - """Encodes a context, continuation pair by taking care of the spaces in between. + def tok_encode_pair(self, context, continuations: list[str], pairwise: bool = False): + """Encodes a context with a list of continuations by taking care of the spaces in between. Args: context (str): The context string to be encoded. - continuation (str): The continuation string to be encoded. + continuation (list[str]): List of continuation strings to be encoded. pairwise (bool): - If True, encode context and continuation separately. + If True, encode context and continuations separately. If False, encode them together and then split. Returns: - Tuple[TokenSequence, TokenSequence]: A tuple containing the encoded context and continuation. + Tuple[TokenSequence, list[TokenSequence]]: + A tuple containing the encoded context and a list of encoded continuations. The advantage of pairwise is: 1) It better aligns with how LLM predicts tokens 2) Works in case len(tok(context,cont)) != len(tok(context)) + len(tok(continuation)). E.g this can happen for chinese if no space is used between context/continuation """ - n_spaces = len(context) - len(context.rstrip()) if n_spaces > 0: - continuation = context[-n_spaces:] + continuation + continuations = [context[-n_spaces:] + cont for cont in continuations] context = context[:-n_spaces] if pairwise: # We don't add special tokens to the continuation as if bos is added # models tend to to completely ignore a context - context_enc, continuation_enc = ( - self.tok_encode(context, add_special_tokens=self.add_special_tokens), - self.tok_encode(continuation, add_special_tokens=False), - ) + context_enc = self.tok_encode(context, add_special_tokens=self.add_special_tokens) + continuation_enc = [self.tok_encode(cont, add_special_tokens=False) for cont in continuations] # In theory the context_enc can be ended with eos token, this would again # cause the model to ignore the context. We thus strip the eos token from context_enc if len(context_enc) > 0 and context_enc[-1] == self.tokenizer.eos_token_id: context_enc = context_enc[:-1] - return context_enc, continuation_enc + context_encs = [context_enc] * len(continuation_enc) + + return context_encs, continuation_enc - whole_enc = self.tok_encode(context + continuation) + # Handle list of continuations context_enc = self.tok_encode(context) - context_enc_len = len(context_enc) - # In case continuation tokens merge with context tokens we use the merged token as continuation - if len(context_enc) == len(whole_enc): - context_enc_len = len(context_enc) - 1 - context_enc = whole_enc[:context_enc_len] - - continuation_enc = whole_enc[context_enc_len:] - return context_enc, continuation_enc + context_encs = [] + continuations_encs = [] + for cont in continuations: + whole_enc = self.tok_encode(context + cont) + context_enc_len = len(context_enc) + if len(context_enc) == len(whole_enc): + context_enc_len = len(context_enc) - 1 + continuations_encs.append(whole_enc[context_enc_len:]) + context_encs.append(whole_enc[:context_enc_len]) + + return context_encs, continuations_encs def tok_decode(self, tokens: torch.LongTensor) -> list[str]: return self.tokenizer.batch_decode(tokens, skip_special_tokens=True) diff --git a/src/lighteval/models/custom/custom_model.py b/src/lighteval/models/custom/custom_model.py index 1c75db00e..480952255 100644 --- a/src/lighteval/models/custom/custom_model.py +++ b/src/lighteval/models/custom/custom_model.py @@ -59,18 +59,15 @@ def __init__(self, config, env_config): super().__init__(config, env_config) # Custom initialization... - def greedy_until(self, *args, **kwargs): + def greedy_until(self, docs: list[Doc]) -> list[ModelResponse]: # Custom generation logic... pass + + def loglikelihood(self, docs: list[Doc]) -> list[ModelResponse]: + pass ``` An example of a custom model can be found in `examples/custom_models/google_translate_model.py`. - - Notes: - - The custom model class must inherit from LightevalModel and implement all required methods - - Only one class inheriting from LightevalModel should be defined in the file - - The model file is dynamically loaded at runtime, so ensure all dependencies are available - - Exercise caution when loading custom model files as they can execute arbitrary code """ model_name: str diff --git a/src/lighteval/models/dummy/dummy_model.py b/src/lighteval/models/dummy/dummy_model.py index 766d89bb9..a73e239cf 100644 --- a/src/lighteval/models/dummy/dummy_model.py +++ b/src/lighteval/models/dummy/dummy_model.py @@ -23,22 +23,36 @@ # inspired by https://github.com/EleutherAI/lm-evaluation-harness/blob/main/lm_eval/models/dummy.py import random -from typing import Optional -from pydantic import BaseModel -from transformers import AutoTokenizer +from transformers.models.auto.tokenization_auto import AutoTokenizer from lighteval.models.abstract_model import LightevalModel, ModelInfo -from lighteval.models.model_output import GenerativeResponse, LoglikelihoodResponse, LoglikelihoodSingleTokenResponse -from lighteval.tasks.requests import ( - GreedyUntilRequest, - LoglikelihoodRequest, - LoglikelihoodRollingRequest, - LoglikelihoodSingleTokenRequest, -) +from lighteval.models.model_output import ModelResponse +from lighteval.models.utils import ModelConfig +from lighteval.tasks.requests import Doc -class DummyModelConfig(BaseModel, extra="forbid"): +class DummyModelConfig(ModelConfig): + """ + Configuration class for dummy models used for testing and baselines. + + This configuration is used to create dummy models that generate random responses + or baselines for evaluation purposes. Useful for testing evaluation pipelines + without requiring actual model inference. + + Attributes: + seed (int): + Random seed for reproducible dummy responses. Defaults to 42. + This seed controls the randomness of the generated responses and log probabilities. + + Example: + ```python + config = DummyModelConfig( + seed=123, + ) + ``` + """ + seed: int = 42 @@ -68,25 +82,29 @@ def add_special_tokens(self): def max_length(self) -> int: return 2048 - def greedy_until( - self, requests: list[GreedyUntilRequest], override_bs: Optional[int] = None - ) -> list[GenerativeResponse]: - return [GenerativeResponse(result="random baseline") for _ in range(len(requests))] - - def loglikelihood( - self, requests: list[LoglikelihoodRequest], override_bs: Optional[int] = None - ) -> list[LoglikelihoodResponse]: - return [LoglikelihoodResponse((-self._random.random(), False)) for _ in requests] - - def loglikelihood_rolling( - self, requests: list[LoglikelihoodRollingRequest], override_bs: Optional[int] = None - ) -> list[LoglikelihoodResponse]: - return [LoglikelihoodResponse((-self._random.random(), False)) for _ in requests] - - def loglikelihood_single_token( - self, requests: list[LoglikelihoodSingleTokenRequest], override_bs: Optional[int] = None - ) -> list[LoglikelihoodSingleTokenResponse]: - return [ - LoglikelihoodSingleTokenResponse(result=[-self._random.random() for _ in req.tokenized_continuation]) - for req in requests - ] + def greedy_until(self, docs: list[Doc]) -> list[ModelResponse]: + return [ModelResponse(text=["random baseline"]) for _ in range(len(docs))] + + def loglikelihood(self, docs: list[Doc]) -> list[ModelResponse]: + model_responses = [] + for doc in docs: + model_responses.append( + ModelResponse( + logprobs=[-self._random.random() for _ in doc.choices], + argmax_logits_eq_gold=[False for _ in doc.choices], + ) + ) + + return model_responses + + def loglikelihood_rolling(self, docs: list[Doc]) -> list[ModelResponse]: + model_responses = [] + for doc in docs: + model_responses.append( + ModelResponse( + logprobs=[-self._random.random() for _ in doc.choices], + argmax_logits_eq_gold=[False for _ in doc.choices], + ) + ) + + return model_responses diff --git a/src/lighteval/models/endpoints/endpoint_model.py b/src/lighteval/models/endpoints/endpoint_model.py index 8daa48ea1..49d45e961 100644 --- a/src/lighteval/models/endpoints/endpoint_model.py +++ b/src/lighteval/models/endpoints/endpoint_model.py @@ -33,29 +33,23 @@ InferenceClient, InferenceEndpoint, InferenceEndpointError, - InferenceEndpointTimeoutError, TextGenerationInputGrammarType, TextGenerationOutput, create_inference_endpoint, get_inference_endpoint, ) -from huggingface_hub.utils import HfHubHTTPError +from huggingface_hub.errors import HfHubHTTPError from requests import ConnectionError from torch.utils.data import DataLoader from tqdm import tqdm -from transformers import AutoTokenizer +from transformers.models.auto.tokenization_auto import AutoTokenizer from lighteval.data import GenerativeTaskDataset, LoglikelihoodDataset from lighteval.models.abstract_model import LightevalModel, ModelInfo -from lighteval.models.model_output import GenerativeResponse, LoglikelihoodResponse, LoglikelihoodSingleTokenResponse +from lighteval.models.model_output import ModelResponse from lighteval.models.utils import ModelConfig -from lighteval.tasks.requests import ( - GreedyUntilRequest, - LoglikelihoodRequest, - LoglikelihoodRollingRequest, - LoglikelihoodSingleTokenRequest, -) -from lighteval.utils.utils import as_list +from lighteval.tasks.prompt_manager import PromptManager +from lighteval.tasks.requests import Doc logger = logging.getLogger(__name__) @@ -75,11 +69,114 @@ class ServerlessEndpointModelConfig(ModelConfig): + """ + Configuration class for HuggingFace Inference API (inference endpoints). + + https://huggingface.co/inference-endpoints/dedicated + + Attributes: + model_name (str): + HuggingFace Hub model ID to use with the Inference API. + Example: "meta-llama/Llama-3.1-8B-Instruct" + add_special_tokens (bool): + Whether to add special tokens during tokenization. Defaults to True. + batch_size (int): + Batch size for requests. Defaults to 1 (serverless API limitation). + + Example: + ```python + config = ServerlessEndpointModelConfig( + model_name="meta-llama/Llama-3.1-8B-Instruct", + generation_parameters=GenerationParameters( + temperature=0.7, + max_new_tokens=100 + ) + ) + ``` + """ + model_name: str add_special_tokens: bool = True + batch_size: int = 1 class InferenceEndpointModelConfig(ModelConfig): + """ + Configuration class for HuggingFace Inference Endpoints (dedicated infrastructure). + + This configuration is used to create and manage dedicated inference endpoints + on HuggingFace's infrastructure. These endpoints provide dedicated compute + resources and can handle larger batch sizes and higher throughput. + + Attributes: + endpoint_name (str | None): + Name for the inference endpoint. If None, auto-generated from model_name. + model_name (str | None): + HuggingFace Hub model ID to deploy. Required if endpoint_name is None. + reuse_existing (bool): + Whether to reuse an existing endpoint with the same name. Defaults to False. + accelerator (str): + Type of accelerator to use. Defaults to "gpu". Options: "gpu", "cpu". + dtype (str | None): + Model data type. If None, uses model default. Options: "float16", "bfloat16", "awq", "gptq", "8bit", "4bit". + vendor (str): + Cloud vendor for the endpoint. Defaults to "aws". Options: "aws", "azure", "gcp". + region (str): + Cloud region for the endpoint. Defaults to "us-east-1". + instance_size (str | None): + Instance size for the endpoint. If None, auto-scaled. + instance_type (str | None): + Instance type for the endpoint. If None, auto-scaled. + framework (str): + ML framework to use. Defaults to "pytorch". + endpoint_type (str): + Type of endpoint. Defaults to "protected". Options: "protected", "public". + add_special_tokens (bool): + Whether to add special tokens during tokenization. Defaults to True. + revision (str): + Git revision of the model. Defaults to "main". + namespace (str | None): + Namespace for the endpoint. If None, uses current user's namespace. + image_url (str | None): + Custom Docker image URL. If None, uses default TGI image. + env_vars (dict | None): + Additional environment variables for the endpoint. + batch_size (int): + Batch size for requests. Defaults to 1. + + Methods: + model_post_init(): + Validates configuration and ensures proper parameter combinations. + get_dtype_args(): + Returns environment variables for dtype configuration. + get_custom_env_vars(): + Returns custom environment variables for the endpoint. + + Example: + ```python + config = InferenceEndpointModelConfig( + model_name="microsoft/DialoGPT-medium", + instance_type="nvidia-a100", + instance_size="x1", + vendor="aws", + region="us-east-1", + dtype="float16", + generation_parameters=GenerationParameters( + temperature=0.7, + max_new_tokens=100 + ) + ) + ``` + + Note: + - Creates dedicated infrastructure for model inference + - Supports various quantization methods and hardware configurations + - Auto-scaling available for optimal resource utilization + - Requires HuggingFace Pro subscription for most features + - Endpoints can take several minutes to start up + - Billed based on compute usage and duration + """ + endpoint_name: str | None = None model_name: str | None = None reuse_existing: bool = False @@ -98,6 +195,7 @@ class InferenceEndpointModelConfig(ModelConfig): ) image_url: str | None = None env_vars: dict | None = None + batch_size: int = 1 def model_post_init(self, __context): # xor operator, one is None but not the other @@ -135,6 +233,7 @@ class InferenceEndpointModel(LightevalModel): def __init__( # noqa: C901 self, config: Union[InferenceEndpointModelConfig, ServerlessEndpointModelConfig] ) -> None: + self.config = config self.reuse_existing = getattr(config, "reuse_existing", False) self._max_length = None self.endpoint = None @@ -217,20 +316,14 @@ def __init__( # noqa: C901 logger.info("Trying to deploy your endpoint. Please wait for 10 min.") self.endpoint.wait(timeout=600, refresh_every=60) # We wait for 10 min except InferenceEndpointError as e: + logger.info( + f"Endpoint failed to start on current hardware with error {e}. Trying to autoscale to ({instance_type}, {instance_size})." + ) instance_type, instance_size = InferenceEndpointModel.get_larger_hardware_suggestion( instance_type, instance_size ) must_scaleup_endpoint = True - logger.info( - f"Endpoint failed to start on current hardware with error {e}. Trying to autoscale to ({instance_type}, {instance_size})." - ) - except InferenceEndpointTimeoutError as e: - logger.error( - "Endpoint did not start within 30 minutes, there was a timeout. Please inspect the logs." - ) - self.cleanup() - raise e except HfHubHTTPError as e: # The endpoint actually already exists, we'll spin it up instead of trying to create a new one if "409 Client Error: Conflict for url:" in str(e): @@ -271,6 +364,9 @@ def __init__( # noqa: C901 self._tokenizer = AutoTokenizer.from_pretrained(self.name) self._add_special_tokens = config.add_special_tokens if config.add_special_tokens is not None else False + self.prompt_manager = PromptManager( + use_chat_template=True, tokenizer=self.tokenizer, system_prompt=config.system_prompt + ) self.model_info = ModelInfo( model_name=self.name, model_sha=self.revision, @@ -327,16 +423,11 @@ def add_special_tokens(self): @property def disable_tqdm(self) -> bool: - False # no accelerator = this is the main process + return False # no accelerator = this is the main process def cleanup(self): if self.endpoint is not None: - if self.reuse_existing: - self.endpoint.pause() - logger.warning( - "Since your endpoint was existing before, we did not delete it, but paused it instead. You might want to delete it if you're done using it." - ) - else: + if not self.reuse_existing: self.endpoint.delete() logger.warning( "We deleted the spinned up endpoint after using it. You'll need to create it again if you need to reuse it." @@ -356,8 +447,8 @@ def max_length(self): def _async_process_request( self, context: str, - stop_tokens: list[str], - max_tokens: int, + stop_tokens: list[str] | None, + max_tokens: int | None, grammar: Optional[TextGenerationInputGrammarType] = None, ) -> Coroutine[None, list[TextGenerationOutput], str]: # Todo: add an option to launch with conversational instead for chat prompts @@ -375,9 +466,9 @@ def _async_process_request( def _process_request( self, context: str, - stop_tokens: list[str], - max_tokens: int, - grammar: Optional[TextGenerationInputGrammarType] = None, + stop_tokens: list[str] | None, + max_tokens: int | None, + grammar: TextGenerationInputGrammarType | None = None, ) -> TextGenerationOutput: # Todo: add an option to launch with conversational instead for chat prompts # https://huggingface.co/docs/huggingface_hub/v0.20.3/en/package_reference/inference_client#huggingface_hub.AsyncInferenceClient.conversational @@ -396,72 +487,65 @@ def _process_request( async def _async_process_batch_generate( self, - requests: list[GreedyUntilRequest], - ) -> list[TextGenerationOutput]: + docs: list[Doc], + ): return await asyncio.gather( *[ self._async_process_request( - context=request.context, - stop_tokens=as_list(request.stop_sequence), - max_tokens=request.generation_size, - grammar=request.generation_grammar, + context=self.prompt_manager.prepare_prompt(doc), + stop_tokens=doc.stop_sequences, + max_tokens=doc.generation_size, + grammar=doc.generation_grammar, ) - for request in requests + for doc in docs ] ) def _process_batch_generate( self, - requests: list[GreedyUntilRequest], + docs: list[Doc], ) -> list[TextGenerationOutput]: return [ self._process_request( - context=request.context, - stop_tokens=as_list(request.stop_sequence), - max_tokens=request.generation_size, - grammar=request.generation_grammar, + context=self.prompt_manager.prepare_prompt(doc), + stop_tokens=doc.stop_sequences, + max_tokens=doc.generation_size, + grammar=doc.generation_grammar, ) - for request in requests + for doc in docs ] - async def _async_process_batch_logprob( - self, requests: list[LoglikelihoodRequest], rolling: bool = False - ) -> list[TextGenerationOutput]: + async def _async_process_batch_logprob(self, docs: list[Doc], rolling: bool = False): + contexts = [self.prompt_manager.prepare_prompt(doc) for doc in docs] return await asyncio.gather( *[ self._async_process_request( - context=request.context if rolling else request.context + request.choice, + context=context if rolling else context + doc.choices[0], stop_tokens=[], max_tokens=1, ) - for request in requests + for context, doc in zip(contexts, docs) ] ) - def _process_batch_logprob( - self, requests: list[LoglikelihoodRequest], rolling: bool = False - ) -> list[TextGenerationOutput]: + def _process_batch_logprob(self, docs: list[Doc], rolling: bool = False) -> list[TextGenerationOutput]: + contexts = [self.prompt_manager.prepare_prompt(doc) for doc in docs] return [ self._process_request( - context=request.context if rolling else request.context + request.choice, + context=context if rolling else context + doc.choices[0], stop_tokens=[], max_tokens=1, ) - for request in requests + for context, doc in zip(contexts, docs) ] def greedy_until( self, - requests: List[GreedyUntilRequest], - override_bs: Optional[int] = None, - ) -> List[GenerativeResponse]: - for request in requests: - request.tokenized_context = self.tok_encode(request.context) - request.stop_sequence = as_list(request.stop_sequence) + [self.tokenizer.eos_token] - - dataset = GenerativeTaskDataset(requests=requests, num_dataset_splits=self.DATASET_SPLITS) - batch_size = override_bs if override_bs is not None else BATCH_SIZE - results: List[str] = [] + docs: List[Doc], + ) -> List[ModelResponse]: + dataset = GenerativeTaskDataset(requests=docs, num_dataset_splits=self.DATASET_SPLITS) + batch_size = self.config.batch_size + results = [] for split in tqdm( dataset.splits_iterator(), @@ -475,8 +559,6 @@ def greedy_until( for batch in tqdm( dataloader, desc="Greedy generation", position=1, leave=False, disable=self.disable_tqdm ): - # the `returns_logits` flag is only used to filter the results, we always request the full details. - returns_logits = batch[0].use_logits num_samples = batch[0].num_samples if num_samples > 1: logger.error( @@ -487,30 +569,20 @@ def greedy_until( responses = asyncio.run(self._async_process_batch_generate(batch)) else: responses = self._process_batch_generate(batch) - for i, response in enumerate(responses): + for response in responses: results.append( - GenerativeResponse( - result=response.generated_text, - logits=[item.logprob for item in response.details.prefill] if returns_logits else None, - generated_tokens=[token.id for token in response.details.tokens], - truncated_tokens_count=max( - len(self.tokenizer.encode(batch[i].context)) - self.max_length, 0 - ), - padded_tokens_count=-1, + ModelResponse( + text=[response.generated_text], + output_tokens=[[token.id for token in response.details.tokens]], ) ) return dataset.get_original_order(results) - def loglikelihood( - self, requests: list[LoglikelihoodRequest], override_bs: Optional[int] = None - ) -> list[LoglikelihoodResponse]: - for request in requests: - request.tokenized_context = self.tok_encode(request.context) - request.tokenized_continuation = self.tok_encode(request.choice) - dataset = LoglikelihoodDataset(requests=requests, num_dataset_splits=self.DATASET_SPLITS) - batch_size = override_bs if override_bs is not None else BATCH_SIZE - results: List[str] = [] + def loglikelihood(self, docs: list[Doc]) -> list[ModelResponse]: + dataset = LoglikelihoodDataset(requests=docs, num_dataset_splits=self.DATASET_SPLITS) + batch_size = self.config.batch_size + results = [] for split in tqdm( dataset.splits_iterator(), @@ -526,6 +598,7 @@ def loglikelihood( responses = asyncio.run(self._async_process_batch_logprob(batch)) else: responses = self._process_batch_logprob(batch) + for cur_request, response in zip(batch, responses): cont_toks = torch.tensor(cur_request.tokenized_continuation) len_choice = len(cont_toks) @@ -540,10 +613,10 @@ def loglikelihood( greedy_tokens = torch.tensor(logits).argmax(dim=-1) max_equal = (greedy_tokens == cont_toks).all().squeeze(0) results.append( - LoglikelihoodResponse( - result=(sum(logits), bool(max_equal)), + ModelResponse( + logprobs=(sum(logits), bool(max_equal)), input_tokens=[t.id for t in response.details.prefill[:-len_choice]], - generated_tokens=[t.id for t in response.details.prefill[-len_choice:]], + output_tokens=[t.id for t in response.details.prefill[-len_choice:]], truncated_tokens_count=-1, padded_tokens_count=-1, ) @@ -551,17 +624,11 @@ def loglikelihood( return dataset.get_original_order(results) - def loglikelihood_rolling( - self, requests: list[LoglikelihoodRollingRequest], override_bs=None - ) -> list[LoglikelihoodResponse]: + def loglikelihood_rolling(self, requests: list[Doc], override_bs=None) -> list[ModelResponse]: """This function is used to compute the log likelihood of the context for perplexity metrics.""" - for request in requests: - request.tokenized_context = [self.tokenizer.eos_token_id] - request.tokenized_continuation = self.tok_encode(request.context) - dataset = LoglikelihoodDataset(requests=requests, num_dataset_splits=self.DATASET_SPLITS) batch_size = override_bs if override_bs is not None else BATCH_SIZE - results: List[str] = [] + results: list[ModelResponse] = [] for split in tqdm( dataset.splits_iterator(), @@ -579,11 +646,12 @@ def loglikelihood_rolling( responses = asyncio.run(self._async_process_batch_logprob(batch, rolling=True)) else: responses = self._process_batch_logprob(batch, rolling=True) + for response in responses: logits = [t.logprob for t in response.details.tokens[:-1]] results.append( - LoglikelihoodResponse( + ModelResponse( result=sum(logits), input_tokens=[t.id for t in response.details.prefill], generated_tokens=[t.id for t in response.details.tokens[:-1]], @@ -593,10 +661,3 @@ def loglikelihood_rolling( ) return dataset.get_original_order(results) - - def loglikelihood_single_token( - self, - requests: list[LoglikelihoodSingleTokenRequest], - override_bs: Optional[int] = None, - ) -> list[LoglikelihoodSingleTokenResponse]: - raise ValueError("Endpoint models can't use single token metrics. Change the metric to the standard version") diff --git a/src/lighteval/models/endpoints/inference_providers_model.py b/src/lighteval/models/endpoints/inference_providers_model.py index 42a8b50a8..3e4a69c87 100644 --- a/src/lighteval/models/endpoints/inference_providers_model.py +++ b/src/lighteval/models/endpoints/inference_providers_model.py @@ -34,33 +34,52 @@ from lighteval.data import GenerativeTaskDataset from lighteval.models.abstract_model import LightevalModel from lighteval.models.endpoints.endpoint_model import ModelInfo -from lighteval.models.model_output import ( - GenerativeResponse, - LoglikelihoodResponse, - LoglikelihoodSingleTokenResponse, -) +from lighteval.models.model_output import ModelResponse from lighteval.models.utils import ModelConfig -from lighteval.tasks.requests import ( - GreedyUntilRequest, - LoglikelihoodRequest, - LoglikelihoodRollingRequest, - LoglikelihoodSingleTokenRequest, -) +from lighteval.tasks.prompt_manager import PromptManager +from lighteval.tasks.requests import Doc logger = logging.getLogger(__name__) class InferenceProvidersModelConfig(ModelConfig): - """Configuration for InferenceProvidersClient. - - Args: - model: Name or path of the model to use - provider: Name of the inference provider - timeout: Request timeout in seconds - proxies: Proxy configuration for requests - org_to_bill: Organisation to bill if not the user - generation_parameters: Parameters for text generation + """ + Configuration class for HuggingFace's inference providers (like Together AI, Anyscale, etc.). + + inference providers doc: https://huggingface.co/docs/inference-providers/en/index + + Attributes: + model_name (str): + Name or identifier of the model to use. + provider (str): + Name of the inference provider. Examples: "together", "anyscale", "runpod", etc. + timeout (int | None): + Request timeout in seconds. If None, uses provider default. + proxies (Any | None): + Proxy configuration for requests. Can be a dict or proxy URL string. + org_to_bill (str | None): + Organization to bill for API usage. If None, bills the user's account. + parallel_calls_count (NonNegativeInt): + Number of parallel API calls to make. Defaults to 10. + Higher values increase throughput but may hit rate limits. + + Example: + ```python + config = InferenceProvidersModelConfig( + model_name="deepseek-ai/DeepSeek-R1-0528", + provider="together", + parallel_calls_count=5, + generation_parameters=GenerationParameters( + temperature=0.7, + max_new_tokens=100 + ) + ) + ``` + + Note: + - Requires HF API keys to be set in environment variable + - Different providers have different rate limits and pricing """ model_name: str @@ -88,7 +107,7 @@ def __init__(self, config: InferenceProvidersModelConfig) -> None: model_name=config.model_name, model_sha="", model_dtype=None, - model_size="", + model_size=-1, ) self.model_name = config.model_name self.provider = config.provider @@ -109,15 +128,12 @@ def __init__(self, config: InferenceProvidersModelConfig) -> None: try: self._tokenizer = AutoTokenizer.from_pretrained(self.model_name) except HfHubHTTPError: - logger.warning("Could not load model's tokenizer: {e}.") + logger.warning(f"Could not load model's tokenizer for the model {self.model_name}.") self._tokenizer = None - def _encode(self, text: str) -> dict: - if self._tokenizer: - enc = self._tokenizer(text=text) - return enc - logger.warning("Tokenizer is not loaded, can't encore the text, returning it as such.") - return text + self.prompt_manager = PromptManager( + use_chat_template=True, tokenizer=self.tokenizer, system_prompt=config.system_prompt + ) async def __call_api(self, prompt: List[dict], num_samples: int) -> Optional[ChatCompletionOutput]: """Make API call with exponential backoff retry logic. @@ -137,6 +153,11 @@ async def __call_api(self, prompt: List[dict], num_samples: int) -> Optional[Cha "n": num_samples, } kwargs.update(self.generation_parameters.to_inference_providers_dict()) + if kwargs.get("temperature") == 0.0 and num_samples > 1: + raise ValueError( + "Temperature is set to 0.0, but num_samples > 1. " + "This is not supported by the inference providers API." + ) response: ChatCompletionOutput = await self.client.chat.completions.create(**kwargs) return response except Exception as e: @@ -175,8 +196,8 @@ async def bounded_api_call(prompt, num_samples): def greedy_until( self, - requests: list[GreedyUntilRequest], - ) -> list[GenerativeResponse]: + docs: list[Doc], + ) -> list[ModelResponse]: """ Generates responses using a greedy decoding strategy until certain ending conditions are met. @@ -187,7 +208,7 @@ def greedy_until( Returns: list[GenerativeResponse]: list of generated responses. """ - dataset = GenerativeTaskDataset(requests=requests, num_dataset_splits=self.DATASET_SPLITS) + dataset = GenerativeTaskDataset(requests=docs, num_dataset_splits=self.DATASET_SPLITS) results = [] for split in tqdm( @@ -197,20 +218,18 @@ def greedy_until( position=0, disable=False, # self.disable_tqdm, ): - contexts = [sample.context for sample in split] + contexts = [self.prompt_manager.prepare_prompt_api(doc) for doc in split] num_samples = split[0].num_samples responses = asyncio.run(self.__call_api_parallel(contexts, num_samples)) - for response in responses: + for response, context in zip(responses, contexts): result: list[str] = [choice.message.content for choice in response.choices] - cur_response = GenerativeResponse( + cur_response = ModelResponse( # In empty responses, the model should return an empty string instead of None - result=result if result[0] else [""], - logits=None, - generated_tokens=[], - input_tokens=[], + text=result if result[0] else [""], + input=context, ) results.append(cur_response) @@ -233,20 +252,12 @@ def max_length(self) -> int: logger.warning("Tokenizer was not correctly loaded. Max model context length is assumed to be 30K tokens") return 30000 - def loglikelihood(self, requests: list[LoglikelihoodRequest]) -> list[LoglikelihoodResponse]: + def loglikelihood(self, docs: list[Doc]) -> list[ModelResponse]: """Tokenize the context and continuation and compute the log likelihood of those tokenized sequences. """ raise NotImplementedError - def loglikelihood_rolling(self, requests: list[LoglikelihoodRollingRequest]) -> list[LoglikelihoodResponse]: + def loglikelihood_rolling(self, docs: list[Doc]) -> list[ModelResponse]: """This function is used to compute the log likelihood of the context for perplexity metrics.""" raise NotImplementedError - - def loglikelihood_single_token( - self, requests: list[LoglikelihoodSingleTokenRequest] - ) -> list[LoglikelihoodSingleTokenResponse]: - """Tokenize the context and continuation and compute the log likelihood of those - tokenized sequences. - """ - raise NotImplementedError diff --git a/src/lighteval/models/endpoints/openai_model.py b/src/lighteval/models/endpoints/openai_model.py deleted file mode 100644 index 6a6e20c29..000000000 --- a/src/lighteval/models/endpoints/openai_model.py +++ /dev/null @@ -1,292 +0,0 @@ -# MIT License - -# Copyright (c) 2024 The HuggingFace Team - -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. - -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -import logging -import os -import time -from concurrent.futures import ThreadPoolExecutor -from dataclasses import dataclass -from typing import Optional - -from tqdm import tqdm -from transformers import AutoTokenizer - -from lighteval.data import GenerativeTaskDataset, LoglikelihoodDataset -from lighteval.models.abstract_model import LightevalModel -from lighteval.models.endpoints.endpoint_model import ModelInfo -from lighteval.models.model_input import GenerationParameters -from lighteval.models.model_output import ( - GenerativeResponse, - LoglikelihoodResponse, - LoglikelihoodSingleTokenResponse, -) -from lighteval.tasks.requests import ( - GreedyUntilRequest, - LoglikelihoodRequest, - LoglikelihoodRollingRequest, - LoglikelihoodSingleTokenRequest, -) -from lighteval.utils.imports import is_openai_available - - -logger = logging.getLogger(__name__) - - -if is_openai_available(): - import logging - - import tiktoken - from openai import OpenAI - - logging.getLogger("openai").setLevel(logging.ERROR) - logging.getLogger("httpx").setLevel(logging.ERROR) - - -@dataclass -class OpenAIModelConfig: - model: str - generation_parameters: GenerationParameters = None - base_url: str = "https://api.openai.com/v1" - api_key: str = os.environ.get("OPENAI_API_KEY", None) - - def __post_init__(self): - if not self.generation_parameters: - self.generation_parameters = GenerationParameters() - - @classmethod - def from_path(cls, path: str) -> "OpenAIModelConfig": - import yaml - - with open(path, "r") as f: - loaded_file = yaml.safe_load(f) - config = loaded_file["model"] - api = loaded_file.get("api", {}) - generation_parameters = GenerationParameters.from_dict(config) - return cls(model=config["model_name"], generation_parameters=generation_parameters, **api) - - -class OpenAIClient(LightevalModel): - _DEFAULT_MAX_LENGTH: int = 4096 - - def __init__(self, config: OpenAIModelConfig, env_config) -> None: - self.client = OpenAI(api_key=config.api_key, base_url=config.base_url) - self.config = config - self.generation_parameters = config.generation_parameters - self.sampling_params = self.generation_parameters.to_vllm_openai_dict() - - self.model_info = ModelInfo( - model_name=config.model, - model_sha="", - model_dtype=None, - model_size="", - ) - self.API_MAX_RETRY = 5 - self.API_RETRY_SLEEP = 3 - self.API_RETRY_MULTIPLIER = 2 - self.CONCURENT_CALLS = 100 - self.model = config.model - try: - self._tokenizer = tiktoken.encoding_for_model(self.model) - except KeyError: - self._tokenizer = AutoTokenizer.from_pretrained(self.model) - self.pairwise_tokenization = False - - def __call_api(self, prompt, return_logits, max_new_tokens, num_samples, logit_bias): - for _ in range(self.API_MAX_RETRY): - try: - response_format = {"response_format": {"type": "text"}} if "openai" in self.config.base_url else {} - response = self.client.chat.completions.create( - model=self.model, - messages=[{"role": "user", "content": prompt}], - max_tokens=max_new_tokens if max_new_tokens > 0 else None, - logprobs=return_logits, - logit_bias=logit_bias, - n=num_samples, - **self.sampling_params, - **response_format, - ) - self.API_RETRY_SLEEP = 3 - return response - except Exception as e: - logger.warning(f"{type(e), e}") - time.sleep(self.API_RETRY_SLEEP) - self.API_RETRY_SLEEP = self.API_RETRY_SLEEP**self.API_RETRY_MULTIPLIER - raise Exception("Failed to get response from the API") - - def __call_api_parallel( - self, - prompts, - return_logits: bool | list[bool], - max_new_tokens: int | list[int], - num_samples: int | list[int], - logit_bias: list[dict[int, float]] | None = None, - ): - results = [] - - return_logitss = [return_logits for _ in prompts] if not isinstance(return_logits, list) else return_logits - max_new_tokenss = [max_new_tokens for _ in prompts] if not isinstance(max_new_tokens, list) else max_new_tokens - num_sampless = [num_samples for _ in prompts] if not isinstance(num_samples, list) else num_samples - logit_biass = [logit_bias for _ in prompts] if logit_bias is None else logit_bias - - assert len(prompts) == len(return_logitss) == len(max_new_tokenss) == len(num_sampless) == len(logit_biass), ( - "Length of prompts, return_logitss, max_new_tokenss, num_sampless, logit_biass should be same" - ) - - with ThreadPoolExecutor(self.CONCURENT_CALLS) as executor: - for entry in tqdm( - executor.map(self.__call_api, prompts, return_logitss, max_new_tokenss, num_sampless, logit_biass), - total=len(prompts), - ): - results.append(entry) - - if None in results: - raise ValueError("Some entries are not annotated due to errors in annotate_p, please inspect and retry.") - - return results - - def greedy_until( - self, - requests: list[GreedyUntilRequest], - override_bs: Optional[int] = None, - ) -> list[GenerativeResponse]: - """ - Generates responses using a greedy decoding strategy until certain ending conditions are met. - - Args: - requests (list[Request]): list of requests containing the context and ending conditions. - override_bs (int, optional): Override the batch size for generation. Defaults to None. - - Returns: - list[GenerativeResponse]: list of generated responses. - """ - for request in requests: - request.tokenized_context = self.tok_encode(request.context) - - dataset = GenerativeTaskDataset(requests=requests, num_dataset_splits=self.DATASET_SPLITS) - results = [] - - for split in tqdm( - dataset.splits_iterator(), - total=dataset.num_dataset_splits, - desc="Splits", - position=0, - disable=False, # self.disable_tqdm, - ): - max_new_tokens = split[0].generation_size # could be none - return_logits = split[0].use_logits - num_samples = split[0].num_samples - contexts = [sample.context for sample in split] - - responses = self.__call_api_parallel(contexts, return_logits, max_new_tokens, num_samples) - - for response in responses: - result: list[str] = [output.message.content for output in response.choices] - - cur_response = GenerativeResponse( - result=result, - logits=None, - generated_tokens=[], - input_tokens=[], - ) - results.append(cur_response) - - return dataset.get_original_order(results) - - @property - def tokenizer(self): - return self._tokenizer - - def tok_encode(self, text: str): - return self.tokenizer.encode(text) - - @property - def add_special_tokens(self) -> bool: - return False - - @property - def max_length(self) -> int: - """Return the maximum sequence length of the model.""" - return 4096 - - def loglikelihood( - self, requests: list[LoglikelihoodRequest], override_bs: Optional[int] = None - ) -> list[LoglikelihoodResponse]: - """Tokenize the context and continuation and compute the log likelihood of those - tokenized sequences. - """ - for request in requests: - if request.context == "": - request.tokenized_context = [" "] - request.tokenized_continuation = self.tok_encode(request.choice) - else: - # The following line is mandatory for compatibility with the harness - request.tokenized_context, request.tokenized_continuation = self.tok_encode_pair( - request.context, request.choice, pairwise=self.pairwise_tokenization - ) - return self._loglikelihood_tokens(requests) - - def _loglikelihood_tokens( - self, - requests: list[LoglikelihoodRequest], - ) -> list[LoglikelihoodResponse]: - dataset = LoglikelihoodDataset(requests=requests, num_dataset_splits=1) - results = [] - - for split in tqdm(dataset.splits_iterator()): - inputs = [sample.context for sample in split] - max_new_tokens = [len(sample.tokenized_continuation) for sample in split] - - assert all(new_tokens == 1 for new_tokens in max_new_tokens), ( - "Only single token continuations are supported when using openai API." - ) - - logit_biases = [dict.fromkeys(sample.tokenized_continuation, 100) for sample in split] - - outputs = self.__call_api_parallel( - inputs, return_logits=True, max_new_tokens=max_new_tokens, num_samples=1, logit_bias=logit_biases - ) - - for i, output in enumerate(outputs): - input = split[i] - continuation_logprobs = [content.logprob for content in output.choices[0].logprobs.content] - answer = LoglikelihoodResponse( - input_tokens=input.tokenized_context + input.tokenized_continuation, - generated_tokens=input.tokenized_continuation, - result=(sum(continuation_logprobs), None), - ) - results.append(answer) - - return dataset.get_original_order(results) - - def loglikelihood_rolling( - self, requests: list[LoglikelihoodRollingRequest], override_bs: Optional[int] = None - ) -> list[LoglikelihoodResponse]: - """This function is used to compute the log likelihood of the context for perplexity metrics.""" - raise NotImplementedError - - def loglikelihood_single_token( - self, requests: list[LoglikelihoodSingleTokenRequest], override_bs: Optional[int] = None - ) -> list[LoglikelihoodSingleTokenResponse]: - """Tokenize the context and continuation and compute the log likelihood of those - tokenized sequences. - """ - raise NotImplementedError diff --git a/src/lighteval/models/endpoints/tgi_model.py b/src/lighteval/models/endpoints/tgi_model.py index c948a1ced..662717304 100644 --- a/src/lighteval/models/endpoints/tgi_model.py +++ b/src/lighteval/models/endpoints/tgi_model.py @@ -26,7 +26,7 @@ import requests from huggingface_hub import TextGenerationInputGenerateParameters, TextGenerationInputGrammarType, TextGenerationOutput -from transformers import AutoTokenizer +from transformers.models.auto.tokenization_auto import AutoTokenizer from lighteval.models.endpoints.endpoint_model import InferenceEndpointModel, ModelInfo from lighteval.models.utils import ModelConfig @@ -35,6 +35,10 @@ if is_tgi_available(): from text_generation import AsyncClient +else: + from unittest.mock import Mock + + AsyncClient = Mock() BATCH_SIZE = 50 @@ -47,6 +51,38 @@ def divide_chunks(array, n): class TGIModelConfig(ModelConfig): + """ + Configuration class for Text Generation Inference (TGI) backend. + + doc: https://huggingface.co/docs/text-generation-inference/en/index + + This configuration is used to connect to TGI servers that serve HuggingFace models + using the text-generation-inference library. TGI provides high-performance inference + with features like continuous batching and efficient memory management. + + Attributes: + inference_server_address (str | None): + Address of the TGI server. Format: "http://host:port" or "https://host:port". + Example: "http://localhost:8080" + inference_server_auth (str | None): + Authentication token for the TGI server. If None, no authentication is used. + model_name (str | None): + Optional model name override. If None, uses the model name from server info. + + Example: + ```python + config = TGIModelConfig( + inference_server_address="http://localhost:8080", + inference_server_auth="your-auth-token", + model_name="meta-llama/Llama-3.1-8B-Instruct", + generation_parameters=GenerationParameters( + temperature=0.7, + max_new_tokens=100 + ) + ) + ``` + """ + inference_server_address: str | None inference_server_auth: str | None model_name: str | None @@ -71,8 +107,8 @@ def __init__(self, config: TGIModelConfig) -> None: self.model_info = requests.get(f"{config.inference_server_address}/info", headers=headers).json() if "model_id" not in self.model_info: raise ValueError("Error occurred when fetching info: " + str(self.model_info)) - if config.model_id: - self.model_info["model_id"] = config.model_id + if config.model_name: + self.model_info["model_id"] = config.model_name self._tokenizer = AutoTokenizer.from_pretrained(self.model_info["model_id"]) self._add_special_tokens = True self.use_async = True @@ -131,7 +167,7 @@ def max_length(self) -> int: @property def disable_tqdm(self) -> bool: - False + return False def cleanup(self): pass diff --git a/src/lighteval/models/litellm_model.py b/src/lighteval/models/litellm_model.py index 969b0dc7e..641991a3a 100644 --- a/src/lighteval/models/litellm_model.py +++ b/src/lighteval/models/litellm_model.py @@ -23,25 +23,16 @@ import logging import time from concurrent.futures import ThreadPoolExecutor -from typing import Optional from tqdm import tqdm from lighteval.data import GenerativeTaskDataset from lighteval.models.abstract_model import LightevalModel from lighteval.models.endpoints.endpoint_model import ModelInfo -from lighteval.models.model_output import ( - GenerativeResponse, - LoglikelihoodResponse, - LoglikelihoodSingleTokenResponse, -) +from lighteval.models.model_output import ModelResponse from lighteval.models.utils import ModelConfig -from lighteval.tasks.requests import ( - GreedyUntilRequest, - LoglikelihoodRequest, - LoglikelihoodRollingRequest, - LoglikelihoodSingleTokenRequest, -) +from lighteval.tasks.prompt_manager import PromptManager +from lighteval.tasks.requests import Doc from lighteval.utils.imports import is_litellm_available @@ -51,15 +42,58 @@ import litellm from litellm import encode from litellm.caching.caching import Cache - from litellm.utils import ModelResponse + from litellm.utils import ModelResponse as LitellmModelResponse logging.getLogger("LiteLLM").setLevel(logging.WARNING) logging.getLogger("LiteLLM").handlers.clear() litellm.cache = Cache(type="disk") +else: + from unittest.mock import Mock + + litellm = Mock() + encode = Mock() + LitellmModelResponse = Mock() class LiteLLMModelConfig(ModelConfig): + """ + Configuration class for LiteLLM unified API client. + + This configuration is used to connect to various LLM providers through the LiteLLM + unified API. LiteLLM provides a consistent interface to multiple providers including + OpenAI, Anthropic, Google, and many others. + + litellm doc: https://docs.litellm.ai/docs/ + + Attributes: + model_name (str): + Model identifier. Can include provider prefix (e.g., "gpt-4", "claude-3-sonnet") + or use provider/model format (e.g., "openai/gpt-4", "anthropic/claude-3-sonnet"). + provider (str | None): + Optional provider name override. If None, inferred from model_name. + Examples: "openai", "anthropic", "google", "cohere", etc. + base_url (str | None): + Custom base URL for the API. If None, uses provider's default URL. + Useful for using custom endpoints or local deployments. + api_key (str | None): + API key for authentication. If None, reads from environment variables. + Environment variable names are provider-specific (e.g., OPENAI_API_KEY). + + Example: + ```python + config = LiteLLMModelConfig( + model_name="gpt-4", + provider="openai", + base_url="https://api.openai.com/v1", + generation_parameters=GenerationParameters( + temperature=0.7, + max_new_tokens=100 + ) + ) + ``` + """ + model_name: str provider: str | None = None base_url: str | None = None @@ -78,7 +112,7 @@ def __init__(self, config) -> None: model_name=config.model_name, model_sha="", model_dtype=None, - model_size="", + model_size=-1, ) self.model = config.model_name self.provider = config.provider or config.model_name.split("/")[0] @@ -89,12 +123,15 @@ def __init__(self, config) -> None: self.API_MAX_RETRY = 5 self.API_RETRY_SLEEP = 3 self.API_RETRY_MULTIPLIER = 2 - self.CONCURENT_CALLS = 20 # 100 leads to hitting Anthropic rate limits + self.CONCURENT_CALLS = 10 # 100 leads to hitting Anthropic rate limits self._tokenizer = encode self.pairwise_tokenization = False litellm.drop_params = True litellm.set_verbose = False + self.prompt_manager = PromptManager( + use_chat_template=True, tokenizer=self.tokenizer, system_prompt=config.system_prompt + ) def _prepare_stop_sequence(self, stop_sequence): """Prepare and validate stop sequence.""" @@ -116,7 +153,7 @@ def _prepare_max_new_tokens(self, max_new_tokens): def __call_api(self, prompt, return_logits, max_new_tokens, num_samples, stop_sequence): # noqa: C901 """Make API call with retries.""" - response = ModelResponse() + response = LitellmModelResponse() for attempt in range(self.API_MAX_RETRY): try: stop_sequence = self._prepare_stop_sequence(stop_sequence) @@ -137,7 +174,7 @@ def __call_api(self, prompt, return_logits, max_new_tokens, num_samples, stop_se } if num_samples > 1 and self.generation_parameters.temperature == 0: - logger.warning( + raise ValueError( "num_samples > 1 but temperature is set to 0, this will not sample different outputs." ) @@ -164,7 +201,7 @@ def __call_api(self, prompt, return_logits, max_new_tokens, num_samples, stop_se ) if error_string in e.__dict__["message"]: logger.warning(f"{error_string}. Returning empty response.") - return ModelResponse() + return LitellmModelResponse() except Exception as e: wait_time = min(64, self.API_RETRY_SLEEP * (2**attempt)) # Exponential backoff with max 64s logger.warning( @@ -173,13 +210,13 @@ def __call_api(self, prompt, return_logits, max_new_tokens, num_samples, stop_se time.sleep(wait_time) logger.error(f"API call failed after {self.API_MAX_RETRY} attempts, returning empty response.") - return ModelResponse() + return LitellmModelResponse() def __call_api_parallel( self, prompts, return_logits: bool | list[bool], - max_new_tokens: int | list[int], + max_new_tokens: int | list[int] | None, num_samples: int | list[int], stop_sequence: list[str] | None = None, ): @@ -216,9 +253,8 @@ def __call_api_parallel( def greedy_until( self, - requests: list[GreedyUntilRequest], - override_bs: Optional[int] = None, - ) -> list[GenerativeResponse]: + docs: list[Doc], + ) -> list[ModelResponse]: """ Generates responses using a greedy decoding strategy until certain ending conditions are met. @@ -229,10 +265,7 @@ def greedy_until( Returns: list[GenerativeResponse]: list of generated responses. """ - for request in requests: - request.tokenized_context = self.tok_encode(request.context) - - dataset = GenerativeTaskDataset(requests=requests, num_dataset_splits=self.DATASET_SPLITS) + dataset = GenerativeTaskDataset(requests=docs, num_dataset_splits=self.DATASET_SPLITS) results = [] for split in tqdm( @@ -242,23 +275,26 @@ def greedy_until( position=0, disable=self.disable_tqdm, ): - contexts = [sample.context for sample in split] + contexts = [self.prompt_manager.prepare_prompt_api(doc) for doc in dataset] max_new_tokens = split[0].generation_size # could be none return_logits = split[0].use_logits num_samples = split[0].num_samples - stop_sequence = requests[0].stop_sequence + stop_sequence = split[0].stop_sequences + + if num_samples > 1 and self.generation_parameters.temperature == 0: + raise ValueError( + "num_samples > 1 is not supported with temperature=0, please set temperature > 0 or use non sampling metrics." + ) responses = self.__call_api_parallel(contexts, return_logits, max_new_tokens, num_samples, stop_sequence) - for response in responses: + for response, context in zip(responses, contexts): result: list[str] = [choice.message.content for choice in response.choices] - cur_response = GenerativeResponse( + cur_response = ModelResponse( # In empty responses, the model should return an empty string instead of None - result=result if result[0] else [""], - logits=None, - generated_tokens=[], - input_tokens=[], + text=result if result[0] else [""], + input=context, ) results.append(cur_response) @@ -268,19 +304,6 @@ def greedy_until( def tokenizer(self): return self._tokenizer - def _encode(self, text: str): - enc = encode(model=self.model, text=text) - if hasattr(enc, "ids"): - return enc.ids - return enc - - def tok_encode(self, text: str | list[str]): - if isinstance(text, list): - toks = [self._encode(t["content"]) for t in text] - toks = [tok for tok in toks if tok] - return toks - return self._encode(text) - @property def add_special_tokens(self) -> bool: return False @@ -290,24 +313,12 @@ def max_length(self) -> int: """Return the maximum sequence length of the model.""" return 4096 - def loglikelihood( - self, requests: list[LoglikelihoodRequest], override_bs: Optional[int] = None - ) -> list[LoglikelihoodResponse]: + def loglikelihood(self, docs: list[Doc]) -> list[ModelResponse]: """Tokenize the context and continuation and compute the log likelihood of those tokenized sequences. """ raise NotImplementedError - def loglikelihood_rolling( - self, requests: list[LoglikelihoodRollingRequest], override_bs: Optional[int] = None - ) -> list[LoglikelihoodResponse]: + def loglikelihood_rolling(self, docs: list[Doc]) -> list[ModelResponse]: """This function is used to compute the log likelihood of the context for perplexity metrics.""" raise NotImplementedError - - def loglikelihood_single_token( - self, requests: list[LoglikelihoodSingleTokenRequest], override_bs: Optional[int] = None - ) -> list[LoglikelihoodSingleTokenResponse]: - """Tokenize the context and continuation and compute the log likelihood of those - tokenized sequences. - """ - raise NotImplementedError diff --git a/src/lighteval/models/model_loader.py b/src/lighteval/models/model_loader.py index ed497165a..88799824d 100644 --- a/src/lighteval/models/model_loader.py +++ b/src/lighteval/models/model_loader.py @@ -35,7 +35,6 @@ InferenceProvidersClient, InferenceProvidersModelConfig, ) -from lighteval.models.endpoints.openai_model import OpenAIClient, OpenAIModelConfig from lighteval.models.endpoints.tgi_model import ModelClient, TGIModelConfig from lighteval.models.litellm_model import LiteLLMClient, LiteLLMModelConfig from lighteval.models.sglang.sglang_model import SGLangModel, SGLangModelConfig @@ -51,7 +50,6 @@ NO_TGI_ERROR_MSG, NO_VLLM_ERROR_MSG, is_litellm_available, - is_openai_available, is_sglang_available, is_tgi_available, is_vllm_available, @@ -64,20 +62,16 @@ def load_model( # noqa: C901 config: ModelConfig, ) -> LightevalModel: - """Will load either a model from an inference server or a model from a checkpoint, depending - on the config type. + """ + Load a model from a checkpoint, depending on the config type. Args: - args (Namespace): arguments passed to the program - accelerator (Accelerator): Accelerator that will be used by the model + config (ModelConfig): configuration of the model to load Raises: - ValueError: If you try to load a model from an inference server and from a checkpoint at the same time ValueError: If you try to have both the multichoice continuations start with a space and not to start with a space - ValueError: If you did not specify a base model when using delta weights or adapter weights - Returns: - Union[TransformersModel, AdapterModel, DeltaModel, ModelClient]: The model that will be evaluated + LightevalModel: The model that will be evaluated """ # Inference server loading if isinstance(config, TGIModelConfig): @@ -104,9 +98,6 @@ def load_model( # noqa: C901 if isinstance(config, SGLangModelConfig): return load_sglang_model(config) - if isinstance(config, OpenAIModelConfig): - return load_openai_model(config) - if isinstance(config, LiteLLMModelConfig): return load_litellm_model(config) @@ -133,15 +124,6 @@ def load_litellm_model(config: LiteLLMModelConfig): return model -def load_openai_model(config: OpenAIModelConfig): - if not is_openai_available(): - raise ImportError() - - model = OpenAIClient(config) - - return model - - def load_custom_model(config: CustomModelConfig): logger.warning(f"Executing custom model code loaded from {config.model_definition_file_path}.") diff --git a/src/lighteval/models/model_output.py b/src/lighteval/models/model_output.py index 041687bae..6f0b9884e 100644 --- a/src/lighteval/models/model_output.py +++ b/src/lighteval/models/model_output.py @@ -21,57 +21,110 @@ # SOFTWARE. from dataclasses import dataclass, field -from typing import Optional, Union +from typing import Optional import torch @dataclass class ModelResponse: - result: Union[tuple, list, str] - input_tokens: list[int] = field(default_factory=list) # model inputs - generated_tokens: list[int] = field(default_factory=list) # model generations - truncated_tokens_count: Optional[int] = 0 # How many tokens truncated - padded_tokens_count: Optional[int] = 0 # How many tokens of padding - - def get_result_for_eval(self): - raise NotImplementedError() - - -@dataclass -class LoglikelihoodResponse(ModelResponse): - # Float: Total log prob of the continuation - # Optional(Bool): Whether the continuation is greedy (= all the tokens in the continuation are argmax of prob) - result: Union[tuple[float, bool], float] = field(default_factory=tuple[float, bool]) - - def get_result_for_eval(self): - return self.result - - -@dataclass -class LoglikelihoodSingleTokenResponse(ModelResponse): - # Log probs of the various single token options - result: list[float] = field(default_factory=list) + """ + A class to represent the response from a model during evaluation. + + This dataclass contains all the information returned by a model during inference, + including generated text, log probabilities, token information, and metadata. + Different attributes are required for different types of evaluation metrics. + + Attributes: + input (str | list | None): + The original input prompt or context that was fed to the model. + Used for debugging and analysis purposes. + + text (list[str]): + The generated text responses from the model. Each element represents + one generation (useful when num_samples > 1). + **Required for**: Generative metrics, exact match, llm as a judge, etc. + + logprobs (list[float]): + Log probabilities of the generated tokens or sequences. + **Required for**: loglikelihood and perplexity metrics. + + argmax_logits_eq_gold (list[bool]): + Whether the argmax logits match the gold/expected text. + Used for accuracy calculations in multiple choice and classification tasks. + **Required for**: certain loglikelihood metrics. + + unconditioned_logprobs (Optional[list[float]]): + Log probabilities from an unconditioned model (e.g., without context). + Used for PMI (Pointwise Mutual Information) normalization. + **Required for**: PMI metrics. + + Usage Examples: + + **For generative tasks (text completion, summarization):** + ```python + response = ModelResponse( + text=["The capital of France is Paris."], + input_tokens=[1, 2, 3, 4], + output_tokens=[[5, 6, 7, 8]] + ) + ``` + + **For multiple choice tasks:** + ```python + response = ModelResponse( + logprobs=[-0.5, -1.2, -2.1, -1.8], # Logprobs for each choice + argmax_logits_eq_gold=[False, False, False, False], # Whether correct choice was selected + input_tokens=[1, 2, 3, 4], + output_tokens=[[5], [6], [7], [8]] + ) + ``` + + **For perplexity calculation:** + ```python + response = ModelResponse( + text=["The model generated this text."], + logprobs=[-1.2, -0.8, -1.5, -0.9, -1.1], # Logprobs for each token + input_tokens=[1, 2, 3, 4, 5], + output_tokens=[[6], [7], [8], [9], [10]] + ) + ``` + + **For PMI analysis:** + ```python + response = ModelResponse( + text=["The answer is 42."], + logprobs=[-1.1, -0.9, -1.3, -0.7], # Conditioned logprobs + unconditioned_logprobs=[-2.1, -1.8, -2.3, -1.5], # Unconditioned logprobs + input_tokens=[1, 2, 3, 4], + output_tokens=[[5], [6], [7], [8]] + ) + ``` + + Notes: + - For most evaluation tasks, only a subset of attributes is required + - The `text` attribute is the most commonly used for generative tasks + - `logprobs` are essential for probability-based metrics like perplexity + - `argmax_logits_eq_gold` is specifically for certain multiple choice/classification tasks + - Token-level attributes (`input_tokens`, `output_tokens`) are useful for debugging + - Truncation and padding counts help understand model behavior with long inputs + """ + + input: str | list | None = None + text: list[str] = field(default_factory=list) # The text of the response + logprobs: list[float] = field(default_factory=list) # Log probabilities of the response + argmax_logits_eq_gold: list[bool] = field(default_factory=list) # Whether the argmax logits match the gold text + logits: list[list[float]] | None = None # Logits of the response, if applicable + + truncated_tokens_count: int = 0 # How many tokens truncated + padded_tokens_count: int = 0 # How many tokens of padding - def get_result_for_eval(self): - return self.result - - -@dataclass -class GenerativeResponse(ModelResponse): - result: list[str] = field(default_factory=str) # generated text continuation - logits: Optional[list[float]] = None # Generated text logits - - def get_result_for_eval(self): - return self.result - - -@dataclass -class GenerativeMultiturnResponse(ModelResponse): - result: list[str] = field(default_factory=list) + input_tokens: list[int] = field(default_factory=list) # model inputs + output_tokens: list[list[int]] = field(default_factory=list) # model generations - def get_result_for_eval(self): - return self.result + unconditioned_logprobs: Optional[list[float]] = ( + None # Log probabilities of the unconditioned model (if applicable) + ) @dataclass diff --git a/src/lighteval/models/sglang/sglang_model.py b/src/lighteval/models/sglang/sglang_model.py index d15133dfd..cdf91be99 100644 --- a/src/lighteval/models/sglang/sglang_model.py +++ b/src/lighteval/models/sglang/sglang_model.py @@ -30,17 +30,11 @@ from lighteval.data import GenerativeTaskDataset, LoglikelihoodDataset from lighteval.models.abstract_model import LightevalModel, ModelInfo -from lighteval.models.model_output import ( - GenerativeResponse, - LoglikelihoodResponse, -) +from lighteval.models.model_output import ModelResponse from lighteval.models.utils import ModelConfig, _simplify_name -from lighteval.tasks.requests import ( - GreedyUntilRequest, - LoglikelihoodRequest, -) +from lighteval.tasks.prompt_manager import PromptManager +from lighteval.tasks.requests import Doc from lighteval.utils.imports import is_sglang_available -from lighteval.utils.utils import as_list logger = logging.getLogger(__name__) @@ -57,6 +51,66 @@ class SGLangModelConfig(ModelConfig): + """ + Configuration class for SGLang inference engine. + + This configuration is used to load and configure models using the SGLang inference engine, + which provides high-performance inference. + + sglang doc: https://docs.sglang.ai/index.html# + + Attributes: + model_name (str): + HuggingFace Hub model ID or path to the model to load. + load_format (str): + The format of the model weights to load. choices: auto, pt, safetensors, npcache, dummy, tensorizer, sharded_state, gguf, bitsandbytes, mistral, runai_streamer. + dtype (str): + Data type for model weights. Defaults to "auto". Options: "auto", "float16", "bfloat16", "float32". + tp_size (PositiveInt): + Number of GPUs to use for tensor parallelism. Defaults to 1. + dp_size (PositiveInt): + Number of GPUs to use for data parallelism. Defaults to 1. + context_length (PositiveInt | None): + Maximum context length for the model. + random_seed (PositiveInt | None): + Random seed for reproducibility. Defaults to 1234. + trust_remote_code (bool): + Whether to trust remote code when loading models. Defaults to False. + use_chat_template (bool): + Whether to use chat templates for conversation-style prompts. Defaults to False. + device (str): + Device to load the model on. Defaults to "cuda". + skip_tokenizer_init (bool): + Whether to skip tokenizer initialization. Defaults to False. + kv_cache_dtype (str): + Data type for key-value cache. Defaults to "auto". + add_special_tokens (bool): + Whether to add special tokens during tokenization. Defaults to True. + pairwise_tokenization (bool): + Whether to tokenize context and continuation separately for loglikelihood evals. Defaults to False. + sampling_backend (str | None): + Sampling backend to use. If None, uses default. + attention_backend (str | None): + Attention backend to use. If None, uses default. + mem_fraction_static (PositiveFloat): + Fraction of GPU memory to use for static allocation. Defaults to 0.8. + chunked_prefill_size (PositiveInt): + Size of chunks for prefill operations. Defaults to 4096. + + Example: + ```python + config = SGLangModelConfig( + model_name="meta-llama/Llama-3.1-8B-Instruct", + tp_size=2, + context_length=8192, + generation_parameters=GenerationParameters( + temperature=0.7, + max_new_tokens=100 + ) + ) + ``` + """ + model_name: str load_format: str = "auto" dtype: str = "auto" @@ -99,6 +153,7 @@ def __init__( self.sampling_backend = config.sampling_backend self.attention_backend = config.attention_backend self.pairwise_tokenization = config.pairwise_tokenization + self.prompt_manager = PromptManager(self.use_chat_template, self.tokenizer, config.system_prompt) @property def tokenizer(self): @@ -157,9 +212,8 @@ def _create_auto_tokenizer(self, config: SGLangModelConfig): def greedy_until( self, - requests: list[GreedyUntilRequest], - override_bs: Optional[int] = None, - ) -> list[GenerativeResponse]: + docs: list[Doc], + ) -> list[ModelResponse]: """ Generates responses using a greedy decoding strategy until certain ending conditions are met. @@ -170,11 +224,7 @@ def greedy_until( Returns: list[GenerateReturn]: list of generated responses. """ - for request in requests: - request.stop_sequence = as_list(request.stop_sequence) + [self.tokenizer.eos_token] - request.tokenized_context = self.tok_encode(request.context) - - dataset = GenerativeTaskDataset(requests=requests, num_dataset_splits=self.DATASET_SPLITS) + dataset = GenerativeTaskDataset(requests=docs, num_dataset_splits=self.DATASET_SPLITS) results = [] for split in tqdm( @@ -187,13 +237,13 @@ def greedy_until( if self.use_chat_template: stop_tokens = [] else: - stop_tokens = split[0].stop_sequence + stop_tokens = split[0].stop_sequences max_new_tokens = split[0].generation_size # could be none num_samples = split[0].num_samples - context = [sample.context for sample in split] - tokenized = self.tokenizer(context, add_special_tokens=self.add_special_tokens) + contexts = [self.prompt_manager.prepare_prompt(doc) for doc in split] + tokenized = self.tokenizer(contexts, add_special_tokens=self.add_special_tokens) # The main question for this step is the following: # Would we rather truncate the prompt to allow generation to go to max_new_tokens, at the risk @@ -233,10 +283,10 @@ def greedy_until( output_token_ids = [output[1] for output in output_token_logprobs] logprobs = [output[0] for output in output_token_logprobs] result = [sglang_output["text"]] - cur_response = GenerativeResponse( - result=result, - logits=logprobs, - generated_tokens=list(output_token_ids), + cur_response = ModelResponse( + text=result, + logprobs=logprobs, + output_tokens=list(output_token_ids), input_tokens=input_token_ids, ) results.append(cur_response) @@ -249,7 +299,7 @@ def _generate( stop_tokens: Optional[list[str]] = None, num_samples: int = 1, generate: bool = True, - ) -> list[GenerativeResponse]: + ) -> list: """Contains the actual logic of the generation.""" logprob_start_len = None @@ -259,7 +309,9 @@ def _generate( self.sampling_params["stop"] = stop_tokens self.sampling_params["n"] = num_samples if num_samples > 1 and self.sampling_params["temperature"] == 0: - logger.warning("num_samples > 1 but temperature is set to 0, this will not sample different outputs.") + raise ValueError( + "num_samples > 1 is not supported with temperature=0, please set temperature > 0 or use non sampling metrics." + ) else: self.sampling_params["max_new_tokens"] = 1 self.sampling_params["temperature"] = 0 @@ -275,59 +327,76 @@ def _generate( ) return outputs - def loglikelihood( - self, requests: list[LoglikelihoodRequest], override_bs: Optional[int] = None - ) -> list[LoglikelihoodResponse]: - for request in requests: - if request.context == "": - request.tokenized_context = [self.tokenizer.eos_token_id] - request.tokenized_continuation = self.tok_encode(request.choice) - else: - # The following line is mandatory for compatibility with the harness - request.tokenized_context, request.tokenized_continuation = self.tok_encode_pair( - request.context, request.choice, pairwise=self.pairwise_tokenization - ) - - return self._loglikelihood_tokens(requests, override_bs=override_bs) + def loglikelihood(self, docs: list[Doc]) -> list[ModelResponse]: + return self._loglikelihood_tokens(docs) def _loglikelihood_tokens( self, - requests: list[LoglikelihoodRequest], - override_bs: int = -1, - return_bool_score: bool = True, - rolling: bool = False, - ) -> list[LoglikelihoodResponse]: - dataset = LoglikelihoodDataset(requests=requests, num_dataset_splits=1) + docs: list[Doc], + ) -> list[ModelResponse]: + dataset = LoglikelihoodDataset(requests=docs, num_dataset_splits=1) res = [] for split in tqdm(dataset.splits_iterator(), disable=False): + contexts = [self.prompt_manager.prepare_prompt(doc) for doc in split] # the last token is an eos token, so we don't need to add it - inputs = [sample.tokenized_context + sample.tokenized_continuation for sample in split] # Left truncate the inputs to the maximum length + inputs = [] + tokenized_continuations_batch = [] + tokenized_contexts_batch = [] + + for context, doc in zip(contexts, dataset): + tokenized_contexts, tokenized_continuations = self.tok_encode_pair(context, doc.choices, pairwise=True) + for tokenized_context, tokenized_continuation in zip(tokenized_contexts, tokenized_continuations): + inputs.append(tokenized_context + tokenized_continuation) + tokenized_continuations_batch.append(tokenized_continuation) + tokenized_contexts_batch.append(tokenized_context) + inputs = [input[-self.max_length :] for input in inputs] outputs = self._generate(inputs, generate=False) - for i, output in enumerate(outputs): - input = split[i] - continuation_logprobs = [] - meta_info = output["meta_info"] - input_token_logprobs = meta_info["input_token_logprobs"][::-1] - input_top_logprobs = meta_info["input_top_logprobs"][::-1] - input_top_logprobs = input_top_logprobs[: len(input.tokenized_continuation)] - continuation_logprobs.append(input_token_logprobs[: len(input.tokenized_continuation)]) - bool_score = all( - top[0][1] == input[1] for top, input in zip(input_top_logprobs, continuation_logprobs[0]) - ) - answer = LoglikelihoodResponse( - input_tokens=input.tokenized_context + input.tokenized_continuation, - generated_tokens=input.tokenized_continuation, - result=(sum(item[0] for item in continuation_logprobs[0]), bool_score), + flat_index = 0 + for doc in dataset: + # all the element generated from one doc (one element per choice) + outputs_doc: list[dict] = outputs[flat_index : flat_index + len(doc.choices)] + tokenized_continuations_doc: list[list[int]] = tokenized_continuations_batch[ + flat_index : flat_index + len(doc.choices) + ] + tokenized_contexts_doc: list[list[int]] = tokenized_contexts_batch[ + flat_index : flat_index + len(doc.choices) + ] + logprobs_doc = [] + argmax_doc = [] + output_tokens_doc = [] + input_tokens_doc = [] + + for output, context, continuation in zip( + outputs_doc, tokenized_contexts_doc, tokenized_continuations_doc + ): + meta_info = output["meta_info"] + + input_top_logprobs = meta_info["input_top_logprobs"][::-1] + input_token_logprobs = meta_info["input_token_logprobs"][::-1] + input_top_logprobs = input_top_logprobs[: len(continuation)] + logprobs = input_token_logprobs[: len(continuation)] + bool_score = all(top[0][1] == input[1] for top, input in zip(input_top_logprobs, logprobs)) + logprobs = [logprob[0] for logprob in logprobs] + logprobs_doc.append(logprobs) + argmax_doc.append(bool_score) + output_tokens_doc.append(output["text"]) + input_tokens_doc.append(context + continuation) + + answer = ModelResponse( + input_tokens=input_tokens_doc, + output_tokens=output_tokens_doc, + logprobs=logprobs_doc, + argmax_logits_eq_gold=argmax_doc, ) res.append(answer) return dataset.get_original_order(res) - def loglikelihood_rolling(): - pass + def loglikelihood_rolling(self, docs: list[Doc]) -> list[ModelResponse]: + raise NotImplementedError() - def loglikelihood_single_token(): - pass + def loglikelihood_single_token(self, docs: list[Doc]) -> list[ModelResponse]: + raise NotImplementedError() diff --git a/src/lighteval/models/transformers/adapter_model.py b/src/lighteval/models/transformers/adapter_model.py index e6df27cf2..dc986573f 100644 --- a/src/lighteval/models/transformers/adapter_model.py +++ b/src/lighteval/models/transformers/adapter_model.py @@ -40,7 +40,25 @@ class AdapterModelConfig(TransformersModelConfig): - # Adapter models have the specificity that they look at the base model (= the parent) for the tokenizer and config + """ + Configuration class for PEFT (Parameter-Efficient Fine-Tuning) adapter models. + + This configuration is used to load models that have been fine-tuned using PEFT adapters, + such as LoRA, AdaLoRA, or other parameter-efficient fine-tuning methods. The adapter + weights are merged with the base model during loading for efficient inference. + + Attributes: + base_model (str): + HuggingFace Hub model ID or path to the base model. This is the original + pre-trained model that the adapter was trained on. + adapter_weights (bool): + Flag indicating that this is an adapter model. Must be set to True. + + Note: + - Requires the `peft` library to be installed, `pip install lighteval[adapters]` + - Adapter models have the specificity that they look at the base model (= the parent) for the tokenizer and config + """ + base_model: str adapter_weights: bool diff --git a/src/lighteval/models/transformers/delta_model.py b/src/lighteval/models/transformers/delta_model.py index 3638fe5af..bb24e86ff 100644 --- a/src/lighteval/models/transformers/delta_model.py +++ b/src/lighteval/models/transformers/delta_model.py @@ -36,6 +36,21 @@ class DeltaModelConfig(TransformersModelConfig): + """ + Configuration class for delta models (weight difference models). + + This configuration is used to load models that represent the difference between a + fine-tuned model and its base model. The delta weights are added to the base model + during loading to reconstruct the full fine-tuned model. + + Attributes: + base_model (str): + HuggingFace Hub model ID or path to the base model. This is the original + pre-trained model that the delta was computed from. + delta_weights (bool): + Flag indicating that this is a delta model. Must be set to True. + """ + # Delta models look at the pretrained (= the delta weights) for the tokenizer and model config base_model: str delta_weights: bool diff --git a/src/lighteval/models/transformers/transformers_model.py b/src/lighteval/models/transformers/transformers_model.py index e24c39399..334221712 100644 --- a/src/lighteval/models/transformers/transformers_model.py +++ b/src/lighteval/models/transformers/transformers_model.py @@ -22,11 +22,14 @@ import logging import os +from datetime import timedelta from typing import Optional, Tuple, Union import torch import torch.nn.functional as F import transformers +from accelerate import Accelerator, InitProcessGroupKwargs +from accelerate.utils import calculate_maximum_sizes, convert_bytes, get_max_memory from pydantic import Field, PositiveInt from torch.nn.utils.rnn import pad_sequence from torch.utils.data import DataLoader @@ -41,40 +44,23 @@ from transformers.generation.utils import GenerateOutput from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES -from lighteval.data import GenerativeTaskDataset, LoglikelihoodDataset, LoglikelihoodSingleTokenDataset +from lighteval.data import GenerativeTaskDataset, LoglikelihoodDataset from lighteval.models.abstract_model import LightevalModel, ModelInfo from lighteval.models.model_output import ( Batch, - GenerativeMultiturnResponse, - GenerativeResponse, - LoglikelihoodResponse, - LoglikelihoodSingleTokenResponse, + ModelResponse, ) from lighteval.models.utils import ModelConfig, _get_dtype, _get_model_sha, _simplify_name -from lighteval.tasks.requests import ( - GreedyUntilMultiTurnRequest, - GreedyUntilRequest, - LoglikelihoodRequest, - LoglikelihoodRollingRequest, - LoglikelihoodSingleTokenRequest, - Request, -) +from lighteval.tasks.prompt_manager import PromptManager +from lighteval.tasks.requests import Doc from lighteval.utils.imports import ( is_accelerate_available, ) from lighteval.utils.parallelism import find_executable_batch_size -from lighteval.utils.utils import as_list logger = logging.getLogger(__name__) - -if is_accelerate_available(): - from datetime import timedelta - - from accelerate import Accelerator, InitProcessGroupKwargs - from accelerate.utils import calculate_maximum_sizes, convert_bytes, get_max_memory - os.environ["TOKENIZERS_PARALLELISM"] = "false" STARTING_BATCH_SIZE = 512 @@ -82,54 +68,67 @@ class TransformersModelConfig(ModelConfig): """ - Base configuration class for models. + Configuration class for HuggingFace Transformers models. + + This configuration is used to load and configure models from the HuggingFace Transformers library. Attributes: model_name (str): - HuggingFace Hub model ID name or the path to a pre-trained - model to load. This is effectively the `pretrained_model_name_or_path` - argument of `from_pretrained` in the HuggingFace `transformers` API. - accelerator (Accelerator): accelerator to use for model training. - tokenizer (Optional[str]): HuggingFace Hub tokenizer ID that will be - used for tokenization. - multichoice_continuations_start_space (Optional[bool]): Whether to add a - space at the start of each continuation in multichoice generation. - For example, context: "What is the capital of France?" and choices: "Paris", "London". - Will be tokenized as: "What is the capital of France? Paris" and "What is the capital of France? London". - True adds a space, False strips a space, None does nothing - pairwise_tokenization (bool): Whether to tokenize the context and continuation as separately or together. - subfolder (Optional[str]): The subfolder within the model repository. - revision (str): The revision of the model. - batch_size (int): The batch size for model training. - max_gen_toks (Optional[int]): The maximum number of tokens to generate. - max_length (Optional[int]): The maximum length of the generated output. - add_special_tokens (bool, optional, defaults to True): Whether to add special tokens to the input sequences. - If `None`, the default value will be set to `True` for seq2seq models (e.g. T5) and - `False` for causal models. - model_parallel (bool, optional, defaults to None): - True/False: force to use or not the `accelerate` library to load a large - model across multiple devices. - Default: None which corresponds to comparing the number of processes with - the number of GPUs. If it's smaller => model-parallelism, else not. - dtype (Union[str, torch.dtype], optional, defaults to None):): - Converts the model weights to `dtype`, if specified. Strings get - converted to `torch.dtype` objects (e.g. `float16` -> `torch.float16`). - Use `dtype="auto"` to derive the type from the model's weights. - device (Union[int, str]): device to use for model training. - quantization_config (Optional[BitsAndBytesConfig]): quantization - configuration for the model, manually provided to load a normally floating point - model at a quantized precision. Needed for 4-bit and 8-bit precision. - trust_remote_code (bool): Whether to trust remote code during model - loading. - generation_parameters (GenerationParameters): Range of parameters which will affect the generation. - generation_config (GenerationConfig): GenerationConfig object (only passed during manual creation) - - Methods: - __post_init__(): Performs post-initialization checks on the configuration. - _init_configs(model_name, env_config): Initializes the model configuration. - init_configs(env_config): Initializes the model configuration using the environment configuration. - get_model_sha(): Retrieves the SHA of the model. + HuggingFace Hub model ID or path to a pre-trained model. This corresponds to the + `pretrained_model_name_or_path` argument in HuggingFace's `from_pretrained` method. + tokenizer (str | None): + Optional HuggingFace Hub tokenizer ID. If not specified, uses the same ID as model_name. + Useful when the tokenizer is different from the model (e.g., for multilingual models). + subfolder (str | None): + Subfolder within the model repository. Used when models are stored in subdirectories. + revision (str): + Git revision of the model to load. Defaults to "main". + batch_size (PositiveInt | None): + Batch size for model inference. If None, will be automatically determined. + max_length (PositiveInt | None): + Maximum sequence length for the model. If None, uses model's default. + model_loading_kwargs (dict): + Additional keyword arguments passed to `from_pretrained`. Defaults to empty dict. + add_special_tokens (bool): + Whether to add special tokens during tokenization. Defaults to True. + model_parallel (bool | None): + Whether to use model parallelism across multiple GPUs. If None, automatically + determined based on available GPUs and model size. + dtype (str | None): + Data type for model weights. Can be "float16", "bfloat16", "float32", "auto", "4bit", "8bit". + If "auto", uses the model's default dtype. + device (Union[int, str]): + Device to load the model on. Can be "cuda", "cpu", or GPU index. Defaults to "cuda". + trust_remote_code (bool): + Whether to trust remote code when loading models. Defaults to False. + use_chat_template (bool): + Whether to use chat templates for conversation-style prompts. Defaults to False. + compile (bool): + Whether to compile the model using torch.compile for optimization. Defaults to False. + multichoice_continuations_start_space (bool | None): + Whether to add a space before multiple choice continuations. If None, uses model default. + True forces adding space, False removes leading space if present. + pairwise_tokenization (bool): + Whether to tokenize context and continuation separately or together. Defaults to False. + + Example: + ```python + config = TransformersModelConfig( + model_name="meta-llama/Llama-3.1-8B-Instruct", + batch_size=4, + dtype="float16", + use_chat_template=True, + generation_parameters=GenerationParameters( + temperature=0.7, + max_new_tokens=100 + ) + ) + ``` + Note: + This configuration supports quantization (4-bit and 8-bit) through the dtype parameter. + When using quantization, ensure you have the required dependencies installed + (bitsandbytes for 4-bit/8-bit quantization). """ model_name: str @@ -223,7 +222,11 @@ def __init__( model_name=self.config.model_name, model_sha=self.model_sha, model_dtype=config.dtype, - model_size=str(model_size), + model_size=model_size, + ) + + self.prompt_manager = PromptManager( + use_chat_template=self.use_chat_template, tokenizer=self.tokenizer, system_prompt=config.system_prompt ) def cleanup(self): @@ -240,7 +243,6 @@ def from_model( accelerator: "Accelerator" = None, tokenizer_name: str = None, # custom tokenizer trust_remote_code: bool = False, - use_chat_template: bool = False, add_special_tokens: bool = True, pairwise_tokenization: bool = False, multichoice_continuations_start_space: bool = None, @@ -275,7 +277,7 @@ def from_model( else: self._device = self.config.device - self.use_chat_template = use_chat_template + self.use_chat_template = config.use_chat_template if config else False self._add_special_tokens = add_special_tokens if add_special_tokens is not None else False self.pairwise_tokenization = pairwise_tokenization self.multichoice_continuations_start_space = multichoice_continuations_start_space @@ -290,8 +292,8 @@ def from_model( self.model_info = ModelInfo( model_name=self.model_name, model_sha=self.model_sha, - model_dtype=self.precision, - model_size=model_size, + model_dtype=str(self.precision), + model_size=int(model_size), ) return self @@ -308,7 +310,7 @@ def max_length(self) -> int: return self._max_length @property - def device(self) -> Union[int, str, torch.device]: + def device(self) -> Union[str, torch.device]: return self._device @property @@ -502,16 +504,10 @@ def forward_batch(batch_size): logger.info(f"Determined largest batch size: {batch_size}") return batch_size - def greedy_until_multi_turn( # noqa: C901 - self, - requests: list[GreedyUntilMultiTurnRequest], - ) -> GenerativeMultiturnResponse: - raise NotImplementedError("This method is not implemented for this model") - def greedy_until( self, - requests: list[GreedyUntilRequest], - ) -> list[GenerativeResponse]: + docs: list[Doc], + ) -> list[ModelResponse]: """ Generates responses using a greedy decoding strategy until certain ending conditions are met. @@ -522,13 +518,9 @@ def greedy_until( Returns: list[GenerativeResponse]: list of generated responses. """ - for request in requests: - request.stop_sequence = as_list(request.stop_sequence) + [self.tokenizer.eos_token] - request.tokenized_context = self.tok_encode(request.context) - - dataset = GenerativeTaskDataset(requests=requests, num_dataset_splits=self.DATASET_SPLITS) - starting_batch_size = STARTING_BATCH_SIZE results = [] + dataset = GenerativeTaskDataset(requests=docs, num_dataset_splits=self.DATASET_SPLITS) + starting_batch_size = STARTING_BATCH_SIZE for split in tqdm( dataset.splits_iterator(), @@ -541,8 +533,11 @@ def greedy_until( # No constraints on the generation size: max length allowed is the max model context max_context_continuation_size_allowed = self.max_length else: + context = self.prompt_manager.prepare_prompt(split[0]) + tokenized_context = self.tokenizer(context) + # Longest context in the current split is the first item (since we sort reversed) - longest_context_continuation_size_in_split = len(split[0].tokenized_context) + split[0].generation_size + longest_context_continuation_size_in_split = len(tokenized_context) + split[0].generation_size max_context_continuation_size_allowed = min( longest_context_continuation_size_in_split, self.max_length ) @@ -561,6 +556,8 @@ def greedy_until( for batch in tqdm( dataloader, desc="Greedy generation", position=1, leave=False, disable=self.disable_tqdm ): + contexts = [self.prompt_manager.prepare_prompt(doc) for doc in batch] + # For chat models, generation stops with EOS token, so we don't need to specify stop tokens if self.use_chat_template: stop_tokens = [] @@ -568,19 +565,15 @@ def greedy_until( # NOTE: we are assuming all items in a batch behave similarly (same # stop_tokens and max_tokens genrated) which is not necessarily # the case! Because of that we only use batch size of 1 - stop_tokens = batch[0].stop_sequence + stop_tokens = batch[0].stop_sequences max_new_tokens = batch[0].generation_size - returns_logits = batch[0].use_logits num_samples = batch[0].num_samples - do_sample = batch[0].do_sample - - context = [c.context for c in batch] # See doc https://huggingface.co/docs/transformers/v4.38.2/en/pad_truncation#padding-and-truncation # Will do left truncation and padding, as defined when creating the tokenizer tokenized = self.tokenizer( - context, + contexts, truncation="longest_first", # we truncate to the model max length if needed padding="longest", # we pad to the longest sequence return_tensors="pt", @@ -614,7 +607,7 @@ def greedy_until( input_ids=tokenized["input_ids"], input_lengths=[len(item == 1) for item in tokenized["attention_mask"]], input_mask=tokenized["attention_mask"], - truncated=[max(len(c) - tokenized["input_ids"].shape[1], 0) for c in context], + truncated=[max(len(c) - tokenized["input_ids"].shape[1], 0) for c in contexts], padded=[sum(mask == 0) for mask in tokenized["attention_mask"]], ) @@ -622,9 +615,8 @@ def greedy_until( batch=prepared_batch, max_new_tokens=max_new_tokens, stop_tokens=stop_tokens, - returns_logits=returns_logits, + returns_logits=False, num_samples=num_samples, - do_sample=do_sample, ) results.extend(cur_reponses) @@ -637,20 +629,23 @@ def _generate( stop_tokens: list[str], returns_logits: Optional[bool] = False, num_samples: int = 1, - do_sample: Optional[bool] = False, - ) -> list[GenerativeResponse]: + ) -> list[ModelResponse]: """Contains the actual logic of the generation. First computes the stop sequences, then generates the predictions, then converts the outputs to GenerativeResponse. """ stopping_criteria = stop_sequences_criteria(self.tokenizer, stop_sequences=stop_tokens, batch=batch) batch_size, _ = batch.input_ids.shape + if num_samples > 1 and self.generation_config_dict["temperature"] == 0: + raise ValueError( + "You cannot generate multiple samples with temperature=0. Please set temperature > 0. Or use a non sampling metric." + ) + generation_config = self.generation_config_dict.copy() generation_config.update( max_new_tokens=max_new_tokens, pad_token_id=self.tokenizer.pad_token_id if self.tokenizer.pad_token_id else self.tokenizer.eos_token_id, eos_token_id=self.tokenizer.eos_token_id, - do_sample=do_sample, num_return_sequences=num_samples, output_logits=returns_logits, renormalize_logits=True, @@ -701,11 +696,11 @@ def _generate( decoded_generations.append(decoded_generation) - cur_response = GenerativeResponse( - result=decoded_generations, + cur_response = ModelResponse( + text=decoded_generations, + output_tokens=result_generations, logits=logits[ix][: len_logits[ix]] if returns_logits else None, - generated_tokens=result_generations, - input_tokens=batched_input[: len_ids[ix]], + input_tokens=batched_input[: len_ids[ix]].tolist(), truncated_tokens_count=trunc.cpu().item(), padded_tokens_count=padded.cpu().item(), ) @@ -715,8 +710,8 @@ def _generate( def loglikelihood( self, - requests: list[LoglikelihoodRequest], - ) -> list[LoglikelihoodResponse]: + docs: list[Doc], + ) -> list[ModelResponse]: """Tokenize the context and continuation and compute the log likelihood of those tokenized sequences. @@ -726,52 +721,40 @@ def loglikelihood( Returns: list[Tuple[float, bool]]: _description_ """ - for request in requests: - if request.context == "": - request.tokenized_context = [self.tokenizer.eos_token_id] - request.tokenized_continuation = self.tok_encode(request.choice) - else: - # The following line is mandatory for compatibility with the harness - request.tokenized_context, request.tokenized_continuation = self.tok_encode_pair( - request.context, request.choice, pairwise=self.pairwise_tokenization - ) - - return self._loglikelihood_tokens(requests) + return self._loglikelihood_tokens(docs) def loglikelihood_rolling( self, - requests: list[LoglikelihoodRollingRequest], - ) -> list[LoglikelihoodResponse]: + docs: list[Doc], + ) -> list[ModelResponse]: """This function is used to compute the log likelihood of the context for perplexity metrics.""" - for request in requests: # tuple of one elem - request.tokenized_context = [self.tokenizer.eos_token_id] # Fake context - request.tokenized_continuation = self.tok_encode(request.context) - - results = self._loglikelihood_tokens( - requests, - return_bool_score=False, + return self._loglikelihood_tokens( + docs, rolling=True, ) - return results - def _loglikelihood_tokens( + def _loglikelihood_tokens( # noqa: C901 self, - requests: list[LoglikelihoodRequest], - return_bool_score: bool = True, - rolling: bool = False, - ) -> list[LoglikelihoodResponse]: - dataset = LoglikelihoodDataset(requests=requests, num_dataset_splits=self.DATASET_SPLITS) + docs: list[Doc], + rolling=False, + ) -> list[ModelResponse]: + dataset = LoglikelihoodDataset(requests=docs, num_dataset_splits=1) starting_batch_size = STARTING_BATCH_SIZE - res = [] + all_responses = [] for split in tqdm(dataset.splits_iterator(), disable=self.disable_tqdm): - context_enc = split[0].tokenized_context - continuation_enc = split[0].tokenized_continuation + first_doc_context = self.prompt_manager.prepare_prompt(split[0]) + tokenized_contexts, tokenized_continuations = self.tok_encode_pair( + first_doc_context, split[0].choices, pairwise=self.pairwise_tokenization + ) + if rolling: # we take all the sequence in rolling mode - max_input_length = len(context_enc + continuation_enc) + max_input_length = len(tokenized_contexts[0] + tokenized_continuations[0]) else: # in normal mode, we left cut the context if needed - max_input_length = max(min(self.max_length, len(context_enc + continuation_enc) - 1), 1) + max_input_length = max( + min(self.max_length, len(tokenized_contexts[0] + tokenized_continuations[0]) - 1), 1 + ) batch_size = self._get_batch_size( override_bs=self.config.batch_size, @@ -779,123 +762,197 @@ def _loglikelihood_tokens( starting_batch_size=starting_batch_size, ) starting_batch_size = batch_size * 2 + logger.warning( + f"batch size is set to {batch_size} however, logliklehood evaluates on n choices per samples so batch size will be muiltiplied by number of choices per sample" + ) dataloader = DataLoader(split, batch_size=batch_size, collate_fn=lambda batch: batch) if self.accelerator: dataloader = self.accelerator.prepare(dataloader) for batch in tqdm(dataloader, disable=self.disable_tqdm): + batch_contexts: list[str] = [self.prompt_manager.prepare_prompt(doc) for doc in batch] + batch_tokenized_contexts = [] + batch_tokenized_continuations = [] + + for context, doc in zip(batch_contexts, batch): + doc_contexts, doc_continuations = self.tok_encode_pair(context, doc.choices, pairwise=True) + batch_tokenized_contexts.append(doc_contexts) + batch_tokenized_continuations.append(doc_continuations) + prepared_batch = self.prepare_batch_logprob( - batch, - padding_length=max_input_length, - max_context=max_input_length, + tokenized_contexts=batch_tokenized_contexts, + tokenized_continuations=batch_tokenized_continuations, + max_context=None, # computed as model max length in the function ) model_output = self._model_call(prepared_batch.input_ids) - logits = F.log_softmax(model_output, dim=-1) # [batch, padding_length, vocab] - - logits_sum = [] - max_equals = [] - batch_cont_tokens = [] - for cur_request, cur_logits, inplen in zip(batch, logits, prepared_batch.input_lengths): - cont_toks = torch.tensor(cur_request.tokenized_continuation, dtype=torch.long, device=self.device) - contlen = cont_toks.shape[0] - # We only look at the continuation tokens - if contlen > inplen: - # Continuation is longer than the input size, we are in rolling mode (only continuation) - cur_logits = cur_logits.unsqueeze(0).to(self.device) # [1, seq, vocab] - cont_toks = cont_toks[:inplen].unsqueeze(0).to(self.device) # [1, seq] - else: - cur_logits = ( - cur_logits[inplen - contlen : inplen].unsqueeze(0).to(self.device) - ) # [1, seq, voc] - cont_toks = cont_toks.unsqueeze(0).to(self.device) # [1, seq] - - # Check if per-token argmax is exactly equal to continuation - greedy_tokens = cur_logits.argmax(dim=-1).to(self.device) - # Sometimes the continuation is longer than allowed by the model, we only look at the first tokens - max_equal = (greedy_tokens == cont_toks).all().squeeze(0).to(self.device) - - # Obtain log-probs at the corresponding continuation token indices - cur_logits = torch.gather(cur_logits, 2, cont_toks.unsqueeze(-1)).squeeze(-1) # [1, seq] - - # Answer: (log prob, is-exact-match) - logits_sum.append(cur_logits.sum()) - max_equals.append(max_equal) - batch_cont_tokens.append(cont_toks) - - # Sync all - # Need reshaping before gather - batched_inputs, len_inputs = self.pad_and_gather(prepared_batch.input_ids) - max_cont_tokens_length = max(len(c[0]) for c in batch_cont_tokens) - # These are the true lengths of the continuation tokens, we have to save them to be able to removed padding tokens from the generated tokens. - batch_cont_token_lengths = torch.tensor([c.shape[1] for c in batch_cont_tokens], device=self.device) - batch_cont_tokens = torch.cat( - [ - F.pad(c, (0, max_cont_tokens_length - c.shape[1], 0, 0), value=self.tokenizer.pad_token_id) - for c in batch_cont_tokens - ], - dim=0, - ) - batch_cont_tokens, _ = self.pad_and_gather(batch_cont_tokens) - # Can be gathered as such - logits = torch.tensor(logits_sum, device=self.device) - max_equal = torch.tensor(max_equals, device=self.device) - batch_truncated = torch.tensor(prepared_batch.truncated, device=self.device) - batch_padded = torch.tensor(prepared_batch.padded, device=self.device) - if self.accelerator: - logits = self.accelerator.gather_for_metrics(logits) - max_equal = self.accelerator.gather_for_metrics(max_equal) - batch_truncated = self.accelerator.gather_for_metrics(batch_truncated) - batch_padded = self.accelerator.gather_for_metrics(batch_padded) - batch_cont_token_lengths = self.accelerator.gather_for_metrics(batch_cont_token_lengths) - - for logit, cont_tokens, maxe, batched_input, trunc, padded, len_input, len_token in zip( - logits, - batch_cont_tokens, - max_equal, - batched_inputs, - batch_truncated, - batch_padded, - len_inputs, - batch_cont_token_lengths, - ): - # Filter out padding tokens from input_tokens and generated_tokens - input_tokens = batched_input[:len_input].cpu().tolist() - generated_tokens = cont_tokens[:len_token].cpu().tolist() - - answer = LoglikelihoodResponse( - # todo: we might want to store the logits unsummed - result=(float(logit.sum()), bool(maxe)) if return_bool_score else float(logit.sum()), - input_tokens=input_tokens, - generated_tokens=generated_tokens, - truncated_tokens_count=trunc.cpu().item(), - padded_tokens_count=padded.cpu().item(), + logits = F.log_softmax(model_output, dim=-1) # [batch, sequence_length, vocab] + + flat_index = 0 + batch_logits_sums = [] + batch_max_equals = [] + batch_tokenized_contexts_processed = [] + batch_tokenized_continuations_processed = [] + batch_choices_counts = [] + + # Flatten the logits to match the number of choices per sample + for doc_idx, doc in enumerate(batch): + # Get the size of the corresponding nested list + num_choices = len(batch_tokenized_continuations[doc_idx]) + doc_continuations = batch_tokenized_continuations[doc_idx] + # Extract the corresponding elements from flat_values + doc_logits = logits[flat_index : flat_index + num_choices] + doc_input_lengths = prepared_batch.input_lengths[flat_index : flat_index + num_choices] + # Move the index forward + flat_index += num_choices + + doc_logits_sums = [] + doc_max_equals = [] + doc_continuation_tokens = [] + + for choice_logits, input_length, choice_continuation in zip( + doc_logits, doc_input_lengths, doc_continuations + ): + choice_continuation_tensor = torch.tensor( + choice_continuation, dtype=torch.long, device=self.device + ) + continuation_length = len(choice_continuation_tensor) + if rolling: + choice_logits = choice_logits.unsqueeze(0).to(self.device) # [1, seq, vocab] + choice_continuation_tensor = ( + choice_continuation_tensor[:input_length].unsqueeze(0).to(self.device) + ) # [1, seq] + else: + choice_logits = ( + choice_logits[input_length - continuation_length - 1 : input_length - 1] + .unsqueeze(0) + .to(self.device) + ) + choice_continuation_tensor = choice_continuation_tensor.unsqueeze(0).to( + self.device + ) # [1, seq] + + # Check if per-token argmax is exactly equal to continuation + greedy_tokens = choice_logits.argmax(dim=-1).to(self.device) + # Sometimes the continuation is longer than allowed by the model, we only look at the first tokens + is_exact_match = (greedy_tokens == choice_continuation_tensor).all().squeeze(0).to(self.device) + + # Obtain log-probs at the corresponding continuation token indices + choice_logits = torch.gather( + choice_logits, 2, choice_continuation_tensor.unsqueeze(-1) + ).squeeze(-1) # [1, seq] + + # Answer: (log prob, is-exact-match) + doc_logits_sums.append(choice_logits.sum()) + doc_max_equals.append(is_exact_match) + doc_continuation_tokens.append(choice_continuation_tensor.squeeze(0)) # [seq] + + batch_logits_sums.append(torch.stack(doc_logits_sums)) + batch_max_equals.append(torch.stack(doc_max_equals)) + batch_tokenized_continuations_processed.append( + pad_sequence(doc_continuation_tokens, batch_first=True, padding_value=-1) + ) + batch_tokenized_contexts_processed.append( + torch.tensor(batch_tokenized_contexts[doc_idx][0], dtype=torch.long, device=self.device) + ) + batch_choices_counts.append( + torch.tensor(len(batch_tokenized_continuations[doc_idx]), dtype=torch.long, device=self.device) ) - res.append(answer) - # Clean up GPUs - del model_output - del logits - del batched_inputs - del batch_truncated - del batch_padded + # Gather the results from all processes + # need to gather logits_sum_batch, max_equals_batch and batch_cont_tokens_batch, contexts + if self.accelerator: + # Convert lists to tensors for proper gathering + # Pad and stack the tensors to make them gatherable + choices_lengths = [len(choices) for choices in batch_tokenized_continuations_processed] + choices_lengths_tensor = torch.tensor(choices_lengths, device=self.device) + gathered_choices_lengths = self.accelerator.gather_for_metrics(choices_lengths_tensor) + global_max_choices = gathered_choices_lengths.max().item() + + # Pad logits_sum_batch to same size + padded_logits_sums = [] + for logits_sum_doc in batch_logits_sums: + pad_amount = global_max_choices - len(logits_sum_doc) + padded = F.pad(logits_sum_doc, (0, pad_amount), value=-1) + padded_logits_sums.append(padded) + + padded_max_equals = [] + for max_equals_doc in batch_max_equals: + pad_amount = global_max_choices - len(max_equals_doc) + padded = F.pad(max_equals_doc, (0, pad_amount), value=False) + padded_max_equals.append(padded) + + padded_continuations = [] + for cont_batch in batch_tokenized_continuations_processed: + pad_amount = global_max_choices - cont_batch.shape[0] + padded = F.pad(cont_batch, (0, pad_amount), value=-1) + padded_continuations.append(padded) + + padded_contexts = [] + for ctx_batch in batch_tokenized_contexts_processed: + pad_amount = global_max_choices - ctx_batch.shape[0] + padded = F.pad(ctx_batch, (0, pad_amount), value=-1) + padded_contexts.append(padded) + + # Stack all tensors for gathering + stacked_logits_sums = torch.stack(padded_logits_sums) + stacked_max_equals = torch.stack(padded_max_equals) + stacked_continuations = torch.stack(padded_continuations) + stacked_contexts = torch.stack(padded_contexts) + + # Gather the stacked tensors + gathered_logits_sums = self.accelerator.gather_for_metrics(stacked_logits_sums) + gathered_max_equals = self.accelerator.gather_for_metrics(stacked_max_equals) + gathered_continuations = self.accelerator.gather_for_metrics(stacked_continuations) + gathered_contexts = self.accelerator.gather_for_metrics(stacked_contexts) + + # Convert back to lists for processing + batch_logits_sums = [] + batch_max_equals = [] + batch_tokenized_continuations_processed = [] + batch_tokenized_contexts_processed = [] + + # Only process if we have gathered results + for i, actual_count in enumerate(gathered_choices_lengths): + # Extract non-padded values based on actual counts + batch_logits_sums.append(gathered_logits_sums[i][:actual_count]) + batch_max_equals.append(gathered_max_equals[i][:actual_count]) + batch_tokenized_continuations_processed.append(gathered_continuations[i][:actual_count]) + batch_tokenized_contexts_processed.append(gathered_contexts[i][:actual_count]) + + # Process the gathered results + for i in range(len(batch_logits_sums)): + max_equals_doc = batch_max_equals[i] + logits_sum_doc = batch_logits_sums[i] + tokenized_contexts_batch = batch_tokenized_contexts_processed[i] + tokenized_continuations_batch = batch_tokenized_continuations_processed[i] + answer = ModelResponse( + argmax_logits_eq_gold=[max_equal.cpu().item() for max_equal in max_equals_doc], + logprobs=[sum.cpu().item() for sum in logits_sum_doc], + input_tokens=tokenized_contexts_batch, + output_tokens=tokenized_continuations_batch, + ) + all_responses.append(answer) - return dataset.get_original_order(res) + return dataset.get_original_order(all_responses) def prepare_batch_logprob( - self, batch: list[Request], padding_length: int, max_context: Optional[int] = None, single_token: bool = False + self, + tokenized_contexts: list[list[list[int]]], + tokenized_continuations: list[list[list[int]]], + max_context: Optional[int] = None, ): """Tokenize a batch of inputs and return also the length, truncations and padding. This step is done manually since we tokenize log probability inputs together with their continuation, to manage possible extra spaces added at the start by tokenizers, see tok_encode_pair. """ - if single_token: - inputs = [request.tokenized_context for request in batch] - else: - inputs = [ - request.tokenized_context + request.tokenized_continuation[:-1] for request in batch - ] # The last token (an eos) doesn't need to be given to the model + inputs = [] + # we used to remove the last token of continuation, but it's not needed + for tokenized_context, tokenized_continuation in zip(tokenized_contexts, tokenized_continuations): + inputs.extend( + [context + continuation for context, continuation in zip(tokenized_context, tokenized_continuation)] + ) input_tokens = [] attention_masks = [] @@ -904,37 +961,39 @@ def prepare_batch_logprob( padded = [] if max_context is None: - logger.warning("max_context is None, using max_length") max_context = self.max_length + # First, find the longest sequence length in the batch + max_sequence_length = max(len(seq) for seq in inputs) + # If max_sequence_length is longer than max_context, we need to truncate + effective_max_length = min(max_sequence_length, max_context) + # Each sample is concatenated and cut to length or padded to max_length for orig_tokens in inputs: - truncated.append(max(len(orig_tokens) - max_context, 0)) + # Calculate truncation + truncated.append(max(len(orig_tokens) - effective_max_length, 0)) # Truncate from the left if needed to fit in the model's context - tokens = torch.tensor((orig_tokens)[-max_context:], dtype=torch.long).to(self.device) + tokens = torch.tensor((orig_tokens)[-effective_max_length:], dtype=torch.long).to(self.device) sequence_len = tokens.shape[0] - # We add padding, if needed - padding_length = padding_length if padding_length is not None else sequence_len + # Calculate padding needed to reach effective_max_length + padding_needed = effective_max_length - sequence_len + padded.append(padding_needed) - if padding_length - sequence_len < 0: - logger.warning(f"Padding length {padding_length} is smaller than input length {sequence_len}") - raise ValueError("Negative padding") + # Right padding to reach effective_max_length + tokens = F.pad(tokens, (0, padding_needed), value=self.tokenizer.pad_token_id) - padded.append(padding_length - sequence_len) - # Right padding, since we ignore these logprobs in the end - tokens = F.pad(tokens, (0, padding_length - sequence_len), value=self.tokenizer.pad_token_id) - - # We create the attention mask to ignore padding - mask = tokens == self.tokenizer.pad_token_id + # Create attention mask (1 for real tokens, 0 for padding) + mask = torch.ones_like(tokens, dtype=torch.bool) + mask[sequence_len:] = False attention_masks.append(mask) - input_tokens.append(tokens.unsqueeze(0)) # [1, padding_length] + input_tokens.append(tokens.unsqueeze(0)) # [1, effective_max_length] input_lengths.append(sequence_len) - batched_inputs = torch.cat(input_tokens, dim=0) # [batch, padding_length] - attention_masks = torch.cat(attention_masks, dim=0) + batched_inputs = torch.cat(input_tokens, dim=0) # [batch, effective_max_length] + attention_masks = torch.cat(attention_masks, dim=0) # [batch, effective_max_length] return Batch( input_ids=batched_inputs, @@ -980,122 +1039,6 @@ def pad_and_gather( output_tensor = self.accelerator.gather(output_tensor) return output_tensor, length_tensor - def loglikelihood_single_token( - self, - requests: list[LoglikelihoodSingleTokenRequest], - ) -> list[LoglikelihoodSingleTokenResponse]: - """Tokenize the context and continuation and compute the log likelihood of those - tokenized sequences. - - Args: - requests (list[Tuple[str, dict]]): _description_ - - Returns: - list[Tuple[float, bool]]: _description_ - """ - for request in requests: - if request.context == "": - request.tokenized_context = [self.tokenizer.eos_token_id] - else: - request.tokenized_context = self.tok_encode(request.context) - - # Some models tokenizer want a space at the beginning and other not - continuations = [self._check_continuations_start_space(c) for c in request.choices] - - # We must not accidentally prepend a continuation with a start of sentence token. - continuations_enc = [self.tok_encode(c, add_special_tokens=False) for c in continuations] - if any(len(c) > 1 for c in continuations_enc): - raise ValueError( - f"Trying to do single token multiple choice but one choice has several tokens: {continuations_enc}. " - "If the additional pre-token is a space, try to set `multichoice_continuations_start_space=False` in the model parameters " - ) - request.tokenized_continuation = continuations_enc - - return self._loglikelihood_single_token(requests) - - def _loglikelihood_single_token( - self, - requests: list[LoglikelihoodSingleTokenRequest], - ) -> list[LoglikelihoodSingleTokenResponse]: - dataset = LoglikelihoodSingleTokenDataset(requests=requests, num_dataset_splits=self.DATASET_SPLITS) - starting_batch_size = STARTING_BATCH_SIZE - res = [] - - for split in tqdm(dataset.splits_iterator(), disable=self.disable_tqdm): - context_enc = split[0].tokenized_context - max_context = len(context_enc[-self.max_length :]) - batch_size = self._get_batch_size(override_bs=self.config.batch_size, max_input_length=max_context) - starting_batch_size = batch_size * 2 - - dataloader = DataLoader(split, batch_size=starting_batch_size, collate_fn=lambda batch: batch) - if self.accelerator is not None: - dataloader = self.accelerator.prepare(dataloader) - - for batch in tqdm(dataloader, disable=self.disable_tqdm, position=1): - prepared_batch = self.prepare_batch_logprob( - batch, padding_length=max_context, max_context=max_context, single_token=True - ) - - out = self._model_call(prepared_batch.input_ids) # [batch, padding_length, vocab] - out = F.log_softmax(out, dim=-1) # we do a softmax over the options, no the vocab - - batch_probs = [] - batch_cont_tokens = [] - for cur_request, logits, inplen in zip(batch, out, prepared_batch.input_lengths): - # Get the last token - logits = logits[inplen - 1] # [vocab] - - cont_toks = torch.tensor( - cur_request.tokenized_continuation, dtype=torch.long, device=self.device - ).squeeze(-1) # [num_choices] - - # Obtain log-probs at the corresponding continuation token indices - # last_token_slice = logits[:, -1, :].squeeze(0).tolist() - probs = torch.gather(logits, dim=0, index=cont_toks) # [num_choices] - - # Answer: (log prob, is-exact-match) - # probs = torch.nn.functional.softmax(logits.float(), dim=0) # [num_choices] - batch_probs.append(probs) - batch_cont_tokens.append(cont_toks) - - # Sync all - # Need reshape before gather - batched_inputs, len_inputs = self.pad_and_gather(prepared_batch.input_ids) - # We sometimes have different tasks with a different number of choices. - # Padding to -10000 makes sure that we won't reach index problems later as all log probs will be smaller than that - batch_probs = pad_sequence(batch_probs, batch_first=True, padding_value=-10000000) - batch_probs, len_probs = self.pad_and_gather(batch_probs) - batch_cont_tokens = pad_sequence(batch_cont_tokens, batch_first=True, padding_value=-10000000) - batch_cont_tokens, len_cont = self.pad_and_gather(batch_cont_tokens) - - # No reshape - batch_truncated = torch.tensor(prepared_batch.truncated, device=self.device) - batch_padded = torch.tensor(prepared_batch.padded, device=self.device) - if self.accelerator: - batch_truncated = self.accelerator.gather_for_metrics(batch_truncated) - batch_padded = self.accelerator.gather_for_metrics(batch_padded) - - for ix, (probs, cont_tokens, batched_input, trunc, padded) in enumerate( - zip(batch_probs, batch_cont_tokens, batched_inputs, batch_truncated, batch_padded) - ): - answer = LoglikelihoodSingleTokenResponse( - result=probs[: len_probs[ix]].detach().cpu().tolist(), - input_tokens=batched_input[: len_inputs[ix]].cpu().tolist(), - generated_tokens=cont_tokens[: len_cont[ix]].cpu().tolist(), - truncated_tokens_count=trunc.cpu().item(), - padded_tokens_count=padded.cpu().item(), - ) - res.append(answer) - - # Clean up GPUs - del out - del batch_probs - del batched_inputs - del batch_truncated - del batch_padded - - return dataset.get_original_order(res) - class MultiTokenEOSCriteria(transformers.StoppingCriteria): """Criteria to stop on the specified multi-token sequence.""" diff --git a/src/lighteval/models/transformers/vlm_transformers_model.py b/src/lighteval/models/transformers/vlm_transformers_model.py index 611073e88..ad9929258 100644 --- a/src/lighteval/models/transformers/vlm_transformers_model.py +++ b/src/lighteval/models/transformers/vlm_transformers_model.py @@ -22,9 +22,12 @@ import logging import os +from datetime import timedelta from typing import Optional, Tuple, Union import torch +from accelerate import Accelerator, InitProcessGroupKwargs +from accelerate.utils import gather_object, get_max_memory from pydantic import PositiveInt from torch.utils.data import DataLoader from tqdm import tqdm @@ -32,56 +35,42 @@ AutoConfig, AutoModelForImageTextToText, AutoProcessor, - BitsAndBytesConfig, - PretrainedConfig, ) +from transformers.configuration_utils import PretrainedConfig +from transformers.utils.quantization_config import BitsAndBytesConfig from lighteval.data import GenerativeTaskDataset from lighteval.models.abstract_model import LightevalModel, ModelInfo -from lighteval.models.model_output import ( - GenerativeResponse, - LoglikelihoodResponse, - LoglikelihoodSingleTokenResponse, -) +from lighteval.models.model_output import ModelResponse from lighteval.models.utils import ModelConfig, _get_dtype, _get_model_sha, _simplify_name -from lighteval.tasks.requests import ( - GreedyUntilRequest, - LoglikelihoodRequest, - LoglikelihoodSingleTokenRequest, -) +from lighteval.tasks.prompt_manager import PromptManager +from lighteval.tasks.requests import Doc from lighteval.utils.imports import ( is_accelerate_available, ) -from lighteval.utils.utils import as_list logger = logging.getLogger(__name__) -if is_accelerate_available(): - from datetime import timedelta - - from accelerate import Accelerator, InitProcessGroupKwargs - from accelerate.utils import gather_object, get_max_memory - - class BatchCollator: """Collator for batching requests""" - def __init__(self, processor, **kwargs): + def __init__(self, prompt_manager, processor, **kwargs): self.processor = processor + self.prompt_manager = prompt_manager self.kwargs = kwargs - def __call__(self, requests: list[GreedyUntilRequest]) -> Tuple[dict[str, torch.Tensor], list[GreedyUntilRequest]]: - texts = [request.context for request in requests] + def __call__(self, requests: list[Doc]) -> Tuple[dict[str, torch.Tensor], list[Doc], list[str]]: + texts = [self.prompt_manager.prepare_prompt_multimodal(request) for request in requests] images = [request.images for request in requests] inputs = self.processor(text=texts, images=images, **self.kwargs) - return inputs, requests + return inputs, requests, texts class VLMTransformersModelConfig(ModelConfig): """ - Base configuration class for models. + Configuration class for VLM (image-text-to-text) models. Attributes: model_name (str): @@ -96,7 +85,7 @@ class VLMTransformersModelConfig(ModelConfig): revision (str): The revision of the model. batch_size (int): The batch size for model training. generation_size (Optional[int]): The maximum number of tokens to generate. - max_length (Optional[int]): The maximum length of the generated output. + max_length (Optional[int]): The maximum length of the input + generated output. add_special_tokens (bool, optional, defaults to True): Whether to add special tokens to the input sequences. model_parallel (bool, optional, defaults to None): True/False: force to use or not the `accelerate` library to load a large @@ -113,15 +102,6 @@ class VLMTransformersModelConfig(ModelConfig): model at a quantized precision. Needed for 4-bit and 8-bit precision. trust_remote_code (bool): Whether to trust remote code during model loading. - generation_parameters (GenerationParameters): Range of parameters which will affect the generation. - generation_config (GenerationConfig): GenerationConfig object (only passed during manual creation) - - Methods: - __post_init__(): Performs post-initialization checks on the configuration. - _init_configs(model_name, env_config): Initializes the model configuration. - init_configs(env_config): Initializes the model configuration using the environment configuration. - get_model_sha(): Retrieves the SHA of the model. - """ model_name: str @@ -187,6 +167,10 @@ def __init__( self.generation_config_dict["eos_token_id"] = self.eos_token_id self.generation_config_dict["renormalize_logits"] = True + self.prompt_manager = PromptManager( + use_chat_template=True, tokenizer=self.tokenizer, system_prompt=config.system_prompt + ) + self.model_info = ModelInfo( model_name=self.config.model_name, model_sha=self.model_sha, @@ -352,17 +336,10 @@ def _init_max_length(self) -> int: return 2048 - def _tokenize_requests_context_inplace(self, requests: list[GreedyUntilRequest]): - """Preprocess requests to fill in the tokenized_context field for sorting in the dataset""" - for request in requests: - request.stop_sequence = as_list(request.stop_sequence) + [self.tokenizer.eos_token] - inputs = self.processor(text=request.context, images=request.images) - request.tokenized_context = inputs["input_ids"][0] - def greedy_until( self, - requests: list[GreedyUntilRequest], - ) -> list[GenerativeResponse]: + docs: list[Doc], + ) -> list[ModelResponse]: """ Generates responses using a greedy decoding strategy until certain ending conditions are met. @@ -375,12 +352,10 @@ def greedy_until( """ # Tokenizing context for sorting in the dataset - logger.info("Tokenizing requests context for sorting in the dataset") - self._tokenize_requests_context_inplace(requests) - logger.info("Done tokenizing!") - dataset = GenerativeTaskDataset(requests=requests, num_dataset_splits=self.DATASET_SPLITS) + dataset = GenerativeTaskDataset(requests=docs, num_dataset_splits=self.DATASET_SPLITS) collator = BatchCollator( + self.prompt_manager, self.processor, truncation="longest_first", # we truncate to the model max length if needed padding="longest", # we pad to the longest sequence @@ -396,7 +371,7 @@ def greedy_until( if self.accelerator: dataloader = self.accelerator.prepare(dataloader) - for batch_inputs, batch_requests in tqdm( + for batch_inputs, batch_requests, input_context in tqdm( dataloader, desc="Greedy generation", position=1, leave=True, disable=self.disable_tqdm ): batch_inputs = batch_inputs.to(self.device) @@ -404,13 +379,21 @@ def greedy_until( batch_inputs = batch_inputs.to(self.torch_dtype) max_new_tokens = self.config.generation_size or batch_requests[0].generation_size + num_samples = batch_requests[0].num_samples + do_sample = num_samples > 1 or self.generation_config_dict["temperature"] > 0 + + if num_samples > 1 and self.generation_config_dict["temperature"] == 0: + raise ValueError( + "num_samples > 1 is not supported with temperature=0, please set temperature > 0 or use non sampling metrics." + ) + outputs = self.model.generate( **batch_inputs, **self.generation_config_dict, # custom generation params max_new_tokens=max_new_tokens, - do_sample=batch_requests[0].do_sample, num_return_sequences=batch_requests[0].num_samples, output_logits=batch_requests[0].use_logits, + do_sample=do_sample, ) input_tokens = batch_inputs.input_ids generated_tokens = outputs.sequences[:, input_tokens.shape[1] :] @@ -420,9 +403,10 @@ def greedy_until( batch_results = [] for i in range(len(generated_texts)): - generated_response = GenerativeResponse( - result=generated_texts[i], - generated_tokens=generated_tokens[i].cpu().numpy(), + generated_response = ModelResponse( + input=input_context[i], + text=generated_texts[i], + output_tokens=generated_tokens[i].cpu().numpy(), input_tokens=input_tokens[i].cpu().numpy(), truncated_tokens_count=-1, padded_tokens_count=padded_tokens_count[i].item(), @@ -439,17 +423,12 @@ def greedy_until( def loglikelihood( self, - requests: list[LoglikelihoodRequest], - ) -> list[LoglikelihoodResponse]: - raise NotImplementedError() - - def loglikelihood_single_token( - self, requests: list[LoglikelihoodSingleTokenRequest] - ) -> list[LoglikelihoodSingleTokenResponse]: + docs: list[Doc], + ) -> list[ModelResponse]: raise NotImplementedError() def loglikelihood_rolling( self, - requests: list[LoglikelihoodRequest], - ) -> list[LoglikelihoodResponse]: + docs: list[Doc], + ) -> list[ModelResponse]: raise NotImplementedError() diff --git a/src/lighteval/models/utils.py b/src/lighteval/models/utils.py index 2398bfa02..48f20ac6d 100644 --- a/src/lighteval/models/utils.py +++ b/src/lighteval/models/utils.py @@ -30,13 +30,54 @@ import yaml from huggingface_hub import HfApi from pydantic import BaseModel -from transformers import AutoConfig +from transformers.models.auto.configuration_auto import AutoConfig from lighteval.models.model_input import GenerationParameters class ModelConfig(BaseModel, extra="forbid"): + """ + Base configuration class for all model types in Lighteval. + + This is the foundation class that all specific model configurations inherit from. + It provides common functionality for parsing configuration from files and command-line arguments, + as well as shared attributes that are used by all models like generation parameters and system prompts. + + Attributes: + generation_parameters (GenerationParameters): + Configuration parameters that control text generation behavior, including + temperature, top_p, max_new_tokens, etc. Defaults to empty GenerationParameters. + system_prompt (str | None): + Optional system prompt to be used with chat models. This prompt sets the + behavior and context for the model during evaluation. + + Methods: + from_path(path: str): + Load configuration from a YAML file. + from_args(args: str): + Parse configuration from a command-line argument string. + _parse_args(args: str): + Static method to parse argument strings into configuration dictionaries. + + Example: + ```python + # Load from YAML file + config = ModelConfig.from_path("model_config.yaml") + + # Load from command line arguments + config = ModelConfig.from_args("model_name=meta-llama/Llama-3.1-8B-Instruct,system_prompt='You are a helpful assistant.',generation_parameters={temperature=0.7}") + + # Direct instantiation + config = ModelConfig( + model_name="meta-llama/Llama-3.1-8B-Instruct", + generation_parameters=GenerationParameters(temperature=0.7), + system_prompt="You are a helpful assistant." + ) + ``` + """ + generation_parameters: GenerationParameters = GenerationParameters() + system_prompt: str | None = None @classmethod def from_path(cls, path: str): diff --git a/src/lighteval/models/vllm/vllm_model.py b/src/lighteval/models/vllm/vllm_model.py index d1bdf7328..b1de9d0a7 100644 --- a/src/lighteval/models/vllm/vllm_model.py +++ b/src/lighteval/models/vllm/vllm_model.py @@ -33,17 +33,11 @@ from lighteval.data import GenerativeTaskDataset, LoglikelihoodDataset from lighteval.models.abstract_model import LightevalModel, ModelInfo -from lighteval.models.model_output import ( - GenerativeResponse, - LoglikelihoodResponse, -) +from lighteval.models.model_output import ModelResponse from lighteval.models.utils import ModelConfig, _simplify_name -from lighteval.tasks.requests import ( - GreedyUntilRequest, - LoglikelihoodRequest, -) +from lighteval.tasks.prompt_manager import PromptManager +from lighteval.tasks.requests import Doc from lighteval.utils.imports import is_vllm_available -from lighteval.utils.utils import as_list logger = logging.getLogger(__name__) @@ -66,13 +60,12 @@ logging.getLogger("ray").propagate = True logging.getLogger("ray").handlers.clear() else: - LLM = None - AsyncLLM = None - SamplingParams = None - AsyncEngineArgs = None - get_tokenizer = None - ray = None - distribute = None + from unittest.mock import Mock + + LLM = SamplingParams = get_tokenizer = ray = distribute = destroy_distributed_environment = ( + destroy_model_parallel + ) = Mock() + AsyncLLM = AsyncEngineArgs = RequestOutput = Mock() os.environ["TOKENIZERS_PARALLELISM"] = "false" @@ -80,6 +73,75 @@ class VLLMModelConfig(ModelConfig): + """ + Configuration class for VLLM inference engine. + + This configuration is used to load and configure models using the VLLM inference engine, + which provides high-performance inference for large language models with features like + PagedAttention, continuous batching, and efficient memory management. + + vllm doc: https://docs.vllm.ai/en/v0.7.1/serving/engine_args.html + + Attributes: + model_name (str): + HuggingFace Hub model ID or path to the model to load. + revision (str): + Git revision of the model. Defaults to "main". + dtype (str): + Data type for model weights. Defaults to "bfloat16". Options: "float16", "bfloat16", "float32". + tensor_parallel_size (PositiveInt): + Number of GPUs to use for tensor parallelism. Defaults to 1. + data_parallel_size (PositiveInt): + Number of GPUs to use for data parallelism. Defaults to 1. + pipeline_parallel_size (PositiveInt): + Number of GPUs to use for pipeline parallelism. Defaults to 1. + gpu_memory_utilization (NonNegativeFloat): + Fraction of GPU memory to use. Lower this if running out of memory. Defaults to 0.9. + max_model_length (PositiveInt | None): + Maximum sequence length for the model. If None, automatically inferred. + Reduce this if encountering OOM issues (4096 is usually sufficient). + quantization (str | None): + Quantization method. + load_format (str | None): + The format of the model weights to load. choices: auto, pt, safetensors, npcache, dummy, tensorizer, sharded_state, gguf, bitsandbytes, mistral, runai_streamer. + swap_space (PositiveInt): + CPU swap space size in GiB per GPU. Defaults to 4. + seed (NonNegativeInt): + Random seed for reproducibility. Defaults to 1234. + trust_remote_code (bool): + Whether to trust remote code when loading models. Defaults to False. + add_special_tokens (bool): + Whether to add special tokens during tokenization. Defaults to True. + multichoice_continuations_start_space (bool): + Whether to add a space before multiple choice continuations. Defaults to True. + pairwise_tokenization (bool): + Whether to tokenize context and continuation separately for loglikelihood evals. Defaults to False. + max_num_seqs (PositiveInt): + Maximum number of sequences per iteration. Controls batch size at prefill stage. Defaults to 128. + max_num_batched_tokens (PositiveInt): + Maximum number of tokens per batch. Defaults to 2048. + subfolder (str | None): + Subfolder within the model repository. Defaults to None. + use_chat_template (bool): + Whether to use chat templates for conversation-style prompts. Defaults to False. + is_async (bool): + Whether to use the async version of VLLM. Defaults to False. + + Example: + ```python + config = VLLMModelConfig( + model_name="meta-llama/Llama-3.1-8B-Instruct", + tensor_parallel_size=2, + gpu_memory_utilization=0.8, + max_model_length=4096, + generation_parameters=GenerationParameters( + temperature=0.7, + max_new_tokens=100 + ) + ) + ``` + """ + model_name: str revision: str = "main" # revision of the model dtype: str = "bfloat16" @@ -95,7 +157,6 @@ class VLLMModelConfig(ModelConfig): swap_space: PositiveInt = 4 # CPU swap space size (GiB) per GPU. seed: NonNegativeInt = 1234 trust_remote_code: bool = False - use_chat_template: bool = False add_special_tokens: bool = True multichoice_continuations_start_space: bool = ( True # whether to add a space at the start of each continuation in multichoice generation @@ -104,6 +165,7 @@ class VLLMModelConfig(ModelConfig): max_num_seqs: PositiveInt = 128 # maximum number of sequences per iteration; This variable and `max_num_batched_tokens` effectively control the batch size at prefill stage. See https://github.com/vllm-project/vllm/issues/2492 for detailed explaination. max_num_batched_tokens: PositiveInt = 2048 # maximum number of tokens per batch subfolder: str | None = None + use_chat_template: bool = False is_async: bool = False # Whether to use the async version or sync version of the model @@ -135,6 +197,8 @@ def __init__( self.model_info = ModelInfo(model_name=self.model_name, model_sha=self.model_sha) self.pairwise_tokenization = config.pairwise_tokenization + self.prompt_manager = PromptManager(self.use_chat_template, self.tokenizer, config.system_prompt) + @property def tokenizer(self): return self._tokenizer @@ -221,9 +285,8 @@ def _create_auto_tokenizer(self, config: VLLMModelConfig): def greedy_until( self, - requests: list[GreedyUntilRequest], - override_bs: Optional[int] = None, - ) -> list[GenerativeResponse]: + docs: list[Doc], + ) -> list[ModelResponse]: """ Generates responses using a greedy decoding strategy until certain ending conditions are met. @@ -234,11 +297,7 @@ def greedy_until( Returns: list[GenerateReturn]: list of generated responses. """ - for request in requests: - request.stop_sequence = as_list(request.stop_sequence) + [self.tokenizer.eos_token] - request.tokenized_context = self.tok_encode(request.context) - - dataset = GenerativeTaskDataset(requests=requests, num_dataset_splits=self.DATASET_SPLITS) + dataset = GenerativeTaskDataset(requests=docs, num_dataset_splits=self.DATASET_SPLITS) results = [] for split in tqdm( @@ -255,13 +314,12 @@ def greedy_until( # NOTE: we are assuming all items in a batch behave similarly (same # stop_tokens and max_tokens genrated) which is not necessarily # the case! Because of that we only use batch size of 1 - stop_tokens = split[0].stop_sequence + stop_tokens = split[0].stop_sequences or [] max_new_tokens = self._config.generation_parameters.max_new_tokens or split[0].generation_size - returns_logits = split[0].use_logits num_samples = split[0].num_samples - context = [sample.context for sample in split] + context = [self.prompt_manager.prepare_prompt(doc) for doc in split] tokenized = self.tokenizer(context, add_special_tokens=self.add_special_tokens) # The main question for this step is the following: @@ -297,21 +355,19 @@ def greedy_until( inputs=inputs, max_new_tokens=max_new_tokens, stop_tokens=stop_tokens, - returns_logits=returns_logits, + returns_logits=False, num_samples=num_samples, ) - for vllm_output in vllm_outputs: + for i, vllm_output in enumerate(vllm_outputs): output_token_ids = [outputs.token_ids for outputs in vllm_output.outputs] - logprobs = [output.logprobs for output in vllm_output.outputs] or [] - logprobs = [logprob[token_id].logprob for token_id, logprob in zip(output_token_ids[0], logprobs[0])] result = [output.text for output in vllm_output.outputs] input_token_ids = vllm_output.prompt_token_ids - cur_response = GenerativeResponse( - result=result, - logits=logprobs, - generated_tokens=list(output_token_ids), + cur_response = ModelResponse( + input=context[i], + text=result, + output_tokens=list(output_token_ids), input_tokens=input_token_ids, ) results.append(cur_response) @@ -326,7 +382,7 @@ def _generate( returns_logits: Optional[bool] = False, num_samples: int = 1, generate: bool = True, - ) -> list[GenerativeResponse]: + ) -> list: """Contains the actual logic of the generation.""" sampling_params = SamplingParams(**self._config.generation_parameters.to_vllm_dict()) @@ -335,9 +391,10 @@ def _generate( sampling_params.max_tokens = max_new_tokens sampling_params.stop = stop_tokens sampling_params.logprobs = 1 if returns_logits else 0 - if num_samples > 1 and sampling_params.temperature == 0: - logger.warning("num_samples > 1 but temperature is set to 0, this will not sample different outputs.") + raise ValueError( + "num_samples > 1 is not supported with temperature=0, please set temperature > 0 or use non sampling metrics." + ) else: sampling_params.temperature = 0 sampling_params.prompt_logprobs = 1 @@ -380,58 +437,75 @@ def run_inference_one_model(model_args: dict, sampling_params: SamplingParams, r return outputs - def loglikelihood( - self, requests: list[LoglikelihoodRequest], override_bs: Optional[int] = None - ) -> list[LoglikelihoodResponse]: - for request in requests: - if request.context == "": - request.tokenized_context = [self.tokenizer.eos_token_id] - request.tokenized_continuation = self.tok_encode(request.choice) - else: - # The following line is mandatory for compatibility with the harness - request.tokenized_context, request.tokenized_continuation = self.tok_encode_pair( - request.context, request.choice, pairwise=self.pairwise_tokenization - ) - return self._loglikelihood_tokens(requests, override_bs=override_bs) + def loglikelihood(self, docs: list[Doc]) -> list[ModelResponse]: + return self._loglikelihood_tokens(docs) def _loglikelihood_tokens( self, - requests: list[LoglikelihoodRequest], - override_bs: int = -1, - return_bool_score: bool = True, - rolling: bool = False, - ) -> list[LoglikelihoodResponse]: - dataset = LoglikelihoodDataset(requests=requests, num_dataset_splits=1) + docs: list[Doc], + ) -> list[ModelResponse]: + dataset = LoglikelihoodDataset(requests=docs, num_dataset_splits=1) res = [] for split in tqdm(dataset.splits_iterator()): - # the last token is an eos token, so we don't need to add it - inputs = [sample.tokenized_context + sample.tokenized_continuation for sample in split] + contexts = [self.prompt_manager.prepare_prompt(doc) for doc in split] + + inputs = [] + tokenized_continuations_batch = [] + tokenized_contexts_batch = [] + + for context, doc in zip(contexts, split): + tokenized_contexts, tokenized_continuations = self.tok_encode_pair( + context, doc.choices, pairwise=self.pairwise_tokenization + ) + for tokenized_context, tokenized_continuation in zip(tokenized_contexts, tokenized_continuations): + inputs.append(tokenized_context + tokenized_continuation) + tokenized_continuations_batch.append(tokenized_continuation) + tokenized_contexts_batch.append(tokenized_context) + # Left truncate the inputs to the maximum length inputs = [input[-self.max_length :] for input in inputs] outputs = self._generate(inputs, generate=False) - for i, output in enumerate(outputs): - input = split[i] - continuation_logprobs = [] - for token, logprobs in zip(input.tokenized_continuation[::-1], output.prompt_logprobs[::-1]): - continuation_logprobs.append(logprobs[token]) - bool_score = all(logprob.rank == 1 for logprob in continuation_logprobs) - continuation_logprobs = [logprob.logprob for logprob in continuation_logprobs] - answer = LoglikelihoodResponse( - input_tokens=input.tokenized_context + input.tokenized_continuation, - generated_tokens=input.tokenized_continuation, - result=(sum(continuation_logprobs), bool_score if return_bool_score else None), + flat_index = 0 + for i, doc in enumerate(split): + outputs_doc = outputs[flat_index : flat_index + len(doc.choices)] + tokenized_continuations_doc = tokenized_continuations_batch[flat_index : flat_index + len(doc.choices)] + tokenized_contexts_doc = tokenized_contexts_batch[flat_index : flat_index + len(doc.choices)] + logprobs_doc = [] + argmax_doc = [] + output_tokens_doc = [] + input_tokens_doc = [] + + for output, context, continuation in zip( + outputs_doc, tokenized_contexts_doc, tokenized_continuations_doc + ): + continuation_logprobs = [] + for token, logprobs in zip(continuation[::-1], output.prompt_logprobs[::-1]): + continuation_logprobs.append(logprobs[token]) + + bool_score = all(logprob.rank == 1 for logprob in continuation_logprobs) + continuation_logprobs = [logprob.logprob for logprob in continuation_logprobs] + continuation_logprobs = sum(continuation_logprobs) + logprobs_doc.append(continuation_logprobs) + argmax_doc.append(bool_score) + output_tokens_doc.append(continuation) + input_tokens_doc.append(context) + + answer = ModelResponse( + input=contexts[i], + input_tokens=input_tokens_doc, + output_tokens=output_tokens_doc, + logprobs=logprobs_doc, + argmax_logits_eq_gold=argmax_doc, ) res.append(answer) + flat_index += len(doc.choices) return dataset.get_original_order(res) - def loglikelihood_rolling(): - pass - - def loglikelihood_single_token(): - pass + def loglikelihood_rolling(self, docs: list[Doc]) -> list[ModelResponse]: + raise NotImplementedError() class AsyncVLLMModel(VLLMModel): @@ -445,7 +519,7 @@ def cleanup(self): destroy_distributed_environment() torch.cuda.empty_cache() - def _create_auto_model(self, config: VLLMModelConfig) -> Optional[AsyncLLM]: + def _create_auto_model(self, config: VLLMModelConfig): """ Creates an instance of the async vllm model loaded from HF. Requires using the v1 of VLLM. @@ -483,32 +557,33 @@ def _create_auto_model(self, config: VLLMModelConfig) -> Optional[AsyncLLM]: async def _async_one_item( self, index: int, - request: GreedyUntilRequest | LoglikelihoodRequest, + doc: Doc, + generative: bool, ) -> Coroutine[None, list, str]: """Contains the actual logic of the generation.""" sampling_params = SamplingParams(**self._config.generation_parameters.to_vllm_dict()) - if isinstance(request, LoglikelihoodRequest): + if not generative: sampling_params.temperature = 0 sampling_params.prompt_logprobs = 1 sampling_params.max_tokens = 1 sampling_params.detokenize = False - prompt = request.context + request.choice - index = f"logprob_{index}" - elif isinstance(request, GreedyUntilRequest): - sampling_params.n = request.num_samples + prompt = self.prompt_manager.prepare_prompt(doc) + doc.choice + index_str = f"logprob_{index}" + else: + sampling_params.n = doc.num_samples if sampling_params.n > 1: # Todo clementine: investigate more logger.warning( "Careful, there can be unexpected behavior when using sampling evals with the async vllm model" ) - sampling_params.max_tokens = self._config.generation_parameters.max_new_tokens or request.generation_size - sampling_params.stop = [] if self.use_chat_template else request.stop_sequence - sampling_params.logprobs = int(request.use_logits) - prompt = request.context - index = f"generative_{index}" + sampling_params.max_tokens = self._config.generation_parameters.max_new_tokens or doc.generation_size + sampling_params.stop = [] if self.use_chat_template else doc.stop_sequences + sampling_params.logprobs = int(doc.use_logits) + prompt = self.prompt_manager.prepare_prompt(doc) + index_str = f"generative_{index}" - generator = self.model.generate(request_id=str(index), prompt=prompt, sampling_params=sampling_params) + generator = self.model.generate(request_id=index_str, prompt=prompt, sampling_params=sampling_params) try: while output := await anext(generator): continue @@ -517,18 +592,17 @@ async def _async_one_item( return output - async def _async_batch(self, requests: list[GreedyUntilRequest | LoglikelihoodRequest]) -> list: + async def _async_batch(self, docs: list[Doc], generative: bool) -> list: processed_requests = [ - self._async_one_item(index=index, request=request) for index, request in enumerate(requests) + self._async_one_item(index=index, doc=doc, generative=generative) for index, doc in enumerate(docs) ] results = await asyncio.gather(*processed_requests) return results async def greedy_until( self, - requests: list[GreedyUntilRequest], - **kwargs, - ) -> list[GenerativeResponse]: + docs: list[Doc], + ) -> list[ModelResponse]: """ Generates responses using a greedy decoding strategy until certain ending conditions are met. @@ -538,13 +612,9 @@ async def greedy_until( Returns: list[GenerateReturn]: list of generated responses. """ - for request in requests: - request.stop_sequence = as_list(request.stop_sequence) + [self.tokenizer.eos_token] - request.tokenized_context = self.tok_encode(request.context) - results = [] - responses: list[RequestOutput] = await self._async_batch(requests=requests) + responses = await self._async_batch(docs=docs, generative=True) for response in responses: output_token_ids = [outputs.token_ids for outputs in response.outputs] @@ -553,10 +623,10 @@ async def greedy_until( result = [output.text for output in response.outputs] input_token_ids = response.prompt_token_ids - cur_response = GenerativeResponse( - result=result, - logits=logprobs, - generated_tokens=list(output_token_ids), + cur_response = ModelResponse( + text=result, + logprobs=logprobs, + output_tokens=list(output_token_ids), input_tokens=input_token_ids, ) results.append(cur_response) @@ -565,10 +635,8 @@ async def greedy_until( async def loglikelihood( self, - requests: list[LoglikelihoodRequest], - return_bool_score: bool = True, - **kwargs, - ) -> list[LoglikelihoodResponse]: + docs: list[Doc], + ) -> list[ModelResponse]: """ Generates responses using a greedy decoding strategy until certain ending conditions are met and stores the logprobs. @@ -579,38 +647,22 @@ async def loglikelihood( Returns: list[LoglikelihoodResponse]: list of generated responses. """ - - for request in requests: - if request.context == "": - request.tokenized_context = [self.tokenizer.eos_token_id] - request.tokenized_continuation = self.tok_encode(request.choice) - else: - # The following line is mandatory for compatibility with the harness - request.tokenized_context, request.tokenized_continuation = self.tok_encode_pair( - request.context, request.choice, pairwise=self.pairwise_tokenization - ) - results = [] - responses: list[RequestOutput] = await self._async_batch(requests=requests) + responses = await self._async_batch(docs=docs, generative=False) - for response, input in zip(responses, requests): + for response, input in zip(responses, docs): continuation_logprobs = [] for token, logprobs in zip(input.tokenized_continuation[::-1], response.prompt_logprobs[::-1]): continuation_logprobs.append(logprobs[token]) bool_score = all(logprob.rank == 1 for logprob in continuation_logprobs) continuation_logprobs = [logprob.logprob for logprob in continuation_logprobs] - answer = LoglikelihoodResponse( + answer = ModelResponse( input_tokens=input.tokenized_context + input.tokenized_continuation, - generated_tokens=input.tokenized_continuation, - result=(sum(continuation_logprobs), bool_score if return_bool_score else None), + output_tokens=input.tokenized_continuation, + logprobs=sum(continuation_logprobs), + argmax_logits_eq_gold=bool_score, ) results.append(answer) return results - - def loglikelihood_rolling(self): - pass - - def loglikelihood_single_token(): - pass diff --git a/src/lighteval/pipeline.py b/src/lighteval/pipeline.py index c81a52f17..49e0e3a5f 100644 --- a/src/lighteval/pipeline.py +++ b/src/lighteval/pipeline.py @@ -20,12 +20,10 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. -import ast import asyncio import collections import os import random -import re from contextlib import nullcontext from dataclasses import dataclass from datetime import timedelta @@ -35,18 +33,14 @@ from tqdm import tqdm from lighteval.logging.evaluation_tracker import EvaluationTracker -from lighteval.metrics.utils.metric_utils import MetricCategory +from lighteval.metrics import apply_metric from lighteval.models.model_loader import TransformersModel, load_model from lighteval.models.model_output import ( - GenerativeMultiturnResponse, - GenerativeResponse, - LoglikelihoodResponse, - LoglikelihoodSingleTokenResponse, ModelResponse, ) -from lighteval.tasks.lighteval_task import LightevalTask, create_requests_from_tasks -from lighteval.tasks.registry import Registry, taskinfo_selector -from lighteval.tasks.requests import RequestType, SampleUid +from lighteval.tasks.lighteval_task import LightevalTask, LightevalTaskConfig +from lighteval.tasks.registry import Registry +from lighteval.tasks.requests import SamplingMethod from lighteval.utils.imports import ( NO_ACCELERATE_ERROR_MSG, NO_NANOTRON_ERROR_MSG, @@ -67,6 +61,10 @@ if is_accelerate_available(): from accelerate import Accelerator, InitProcessGroupKwargs +else: + from unittest.mock import Mock + + Accelerator = InitProcessGroupKwargs = Mock() if is_nanotron_available(): from nanotron import distributed as dist from nanotron.parallel.context import ParallelContext @@ -103,8 +101,6 @@ class PipelineParameters: custom_tasks_directory: str | None = None num_fewshot_seeds: int = 1 max_samples: int | None = None - use_chat_template: bool = False - system_prompt: str | None = None cot_prompt: str | None = None load_responses_from_details_date_id: str | None = None bootstrap_iters: int = 1000 @@ -145,6 +141,7 @@ def __init__( self.pipeline_parameters = pipeline_parameters self.launcher_type = self.pipeline_parameters.launcher_type + if self.pipeline_parameters.max_samples: logger.warning( "--max_samples WAS SET. THESE NUMBERS ARE ONLY PARTIAL AND SHOULD NOT BE USED FOR COMPARISON UNLESS YOU KNOW WHAT YOU ARE DOING." @@ -159,6 +156,7 @@ def __init__( generation_parameters = model_config.generation_parameters.model_dump() if model_config else {} self.evaluation_tracker.general_config_logger.log_model_info(generation_parameters, self.model.model_info) + self._init_random_seeds() self._init_tasks_and_requests(tasks=tasks) # Final results @@ -204,51 +202,47 @@ def _init_model(self, model_config, model): else: return TransformersModel.from_model( model=model, - use_chat_template=self.pipeline_parameters.use_chat_template, accelerator=self.accelerator, ) def _init_tasks_and_requests(self, tasks: str): with local_ranks_zero_first() if self.launcher_type == ParallelismManager.NANOTRON else nullcontext(): logger.info("--- LOADING TASKS ---") + + # The registry contains all the potential tasks registry = Registry( custom_tasks=self.pipeline_parameters.custom_tasks_directory, ) - task_names_list, fewshots_dict = taskinfo_selector(tasks, registry) - task_dict = registry.get_task_dict(task_names_list) + + # load the tasks fro the configs and their datasets + task_configs: list[LightevalTaskConfig] = registry.get_tasks_configs(tasks) + self.tasks_dict: dict[str, LightevalTask] = registry.get_tasks_from_configs(task_configs) + LightevalTask.load_datasets(self.tasks_dict, self.pipeline_parameters.dataset_loading_processes) + self.documents_dict = { + task.full_name: task.get_docs(self.pipeline_parameters.max_samples) + for _, task in self.tasks_dict.items() + } + + self.sampling_docs = collections.defaultdict(list) + for _, docs in self.documents_dict.items(): + for doc in docs: + for sampling in doc.sampling_methods: + self.sampling_docs[sampling].append(doc) + # If there are metric_options defined from the yaml file, # review if they have to be updated. if self._metric_options: - self._update_num_samples(task_dict) - LightevalTask.load_datasets(list(task_dict.values()), self.pipeline_parameters.dataset_loading_processes) - - self.evaluation_tracker.task_config_logger.log(task_dict) - - requests, docs = create_requests_from_tasks( - task_dict=task_dict, - fewshot_dict=fewshots_dict, - num_fewshot_seeds=self.pipeline_parameters.num_fewshot_seeds, - lm=self.model, - max_samples=self.pipeline_parameters.max_samples, - evaluation_tracker=self.evaluation_tracker, - use_chat_template=self.pipeline_parameters.use_chat_template, - system_prompt=self.pipeline_parameters.system_prompt, - cot_prompt=self.pipeline_parameters.cot_prompt, - ) + self._update_num_samples(list(self.tasks_dict.values())) - self.task_names_list = task_names_list - self.task_dict = task_dict - self.fewshot_dict = fewshots_dict - self.requests = requests - self.docs = docs + self.evaluation_tracker.task_config_logger.log(self.tasks_dict) - def _update_num_samples(self, task_dict: dict[str, LightevalTask]): + def _update_num_samples(self, tasks: list[LightevalTask]): """Helper function to update the num_samples of a given metric via the yaml file. As it has to be done at the metric level, it's better to update the value per metric. It will add a num_samples to the already defined metrics' num_samples if defined in the yaml file. As later when constructing the requests the max is taken over the num_samples, this is valid. """ - for _, task in task_dict.items(): + for task in tasks: for metric in task.metrics: if metric_data := self._metric_options.get(metric.metric_name, None): num_samples = metric_data.get("num_samples", None) @@ -275,209 +269,62 @@ def evaluate(self): self.evaluation_tracker.general_config_logger.log_args_info( num_fewshot_seeds=self.pipeline_parameters.num_fewshot_seeds, max_samples=self.pipeline_parameters.max_samples, - job_id=self.pipeline_parameters.job_id, + job_id=str(self.pipeline_parameters.job_id), config=self.model_config, ) if self.pipeline_parameters.load_responses_from_details_date_id: try: - sample_id_to_responses = self._load_responses_from_details() + outputs = self._load_responses_from_details() except FileNotFoundError as e: logger.warning( f"No responses found for {self.pipeline_parameters.load_responses_from_details_date_id} in details directory: {e}. Running model instead." ) - sample_id_to_responses = self._run_model() + outputs = self._run_model() else: - sample_id_to_responses = self._run_model() + outputs = self._run_model() - self._compute_metrics(sample_id_to_responses) + self._compute_metrics(outputs) if self.is_main_process(): self.evaluation_tracker.general_config_logger.log_end_time() self.evaluation_tracker.metrics_logger.aggregate( - task_dict=self.task_dict, bootstrap_iters=self.pipeline_parameters.bootstrap_iters + task_dict=self.tasks_dict, bootstrap_iters=self.pipeline_parameters.bootstrap_iters ) self.evaluation_tracker.details_logger.aggregate() - def _unpack(self, x): - if isinstance(x, str): - return x - elif isinstance(x, (list, tuple)): - return self._unpack(x[0]) - else: - raise ValueError(f"Unknown type {type(x)} of prediction {x}") - - def _parse_tensor_string(self, tensor_string): - """ - Convert a string containing PyTorch-like `tensor([...], device='cuda:0', ...)` - into a Python list (or nested lists) of numbers. - - Example: - "[tensor([1, 2, 3], device='cuda:0'), tensor([[4,5],[6,7]], dtype=torch.int64)]" - -> [[1, 2, 3], [[4, 5], [6, 7]]] - """ - - # Regex explanation: - # - tensor\(\s*: Matches "tensor(" (possibly with spaces after), literally. - # - (.*?): Captures everything lazily into group(1), until the first subsequent part matches. - # We rely on the next pattern to anchor the end of this capture. - # - \): The literal closing parenthesis, but we anchor the match by ignoring - # further arguments (device=..., dtype=..., etc.) inside. - # - # The tricky part: a tensor might look like - # tensor([ ... ], device='cuda:0', dtype=torch.int64) - # so the bracket portion is `[ ... ]`, but it can have newlines, etc. - # - # We'll handle that by first capturing the entire content up to the final parenthesis, - # then parse out the bracket portion. This can be done in a function-based re.sub. - - pattern = re.compile( - r"tensor\s*\(\s*(.*?)\s*\)", # capture everything inside tensor(...) - flags=re.DOTALL, - ) - - def tensor_replacer(match): - inside = match.group(1).strip() - # `inside` might look like: [1, 2, 3], device='cuda:0' - # or: - # [ - # 1, 2, 3, - # 4, 5, ... - # ], device='cuda:0', dtype=torch.int64 - # - # 1) Extract the bracketed array portion: the first [ ... ] block - # which might be multi-line. We'll use another regex for that. - - # We look for the bracketed portion from the first '[' to its matching ']'. - # Because the inside can be multi-line, we use DOTALL. But we still need - # to ensure we don't accidentally go beyond the matching bracket. - # - # A robust approach to properly match brackets can be done with a small parser, - # but for typical well-formed strings, a lazy match of the form - # r"\[.*?\]" DOTALL often suffices, assuming no nested brackets inside. - - bracket_pattern = re.compile(r"\[.*?\]", re.DOTALL) - bracket_match = bracket_pattern.search(inside) - if not bracket_match: - # If we fail to find a bracket, just return something safe. - # This means the string didn't match the expected format. - return "[]" - - # The bracketed portion (e.g. "[1, 2, 3\n, 4]"). - bracketed_content = bracket_match.group(0) - - # Return just the bracketed content, - # effectively replacing "tensor(...)" with "[...]". - return bracketed_content - - # Step 1: Replace every `tensor(...)` occurrence with just the bracketed list. - processed = pattern.sub(tensor_replacer, tensor_string) - - # Step 2: Now we can safely parse the result with literal_eval. - # If there's still something weird, it may throw ValueError. - try: - return ast.literal_eval(processed) - except Exception as e: - raise ValueError(f"Failed to parse after preprocessing. Processed string:\n{processed}\n\nError: {e}") - - def _load_responses_from_details(self): - logger.info("--- LOADING RESPONSES FROM DETAILS ---") - sample_id_to_responses: dict[(SampleUid, MetricCategory), list[ModelResponse]] = collections.defaultdict(list) - - request_types = list(self.requests.keys()) - if len(request_types) > 1: - raise ValueError( - "Loading responses from details when there are multiple request types is currently not supported" - ) - model_response_type = self._get_model_response_type(request_types[0]) - - details_datasets = self.evaluation_tracker.load_details_datasets( - self.pipeline_parameters.load_responses_from_details_date_id, self.task_names_list - ) - - for task_name, dataset in tqdm(details_datasets.items(), desc="Loading responses from details for tasks"): - task: LightevalTask = self._get_task(task_name) - num_samples = len(set(dataset["specifics"])) - max_samples = self.pipeline_parameters.max_samples if self.pipeline_parameters.max_samples else num_samples - if num_samples > max_samples: - logger.warning( - f"Skipping {num_samples - max_samples} samples for {task_name} when loading responses from details because max_samples is set to {max_samples}" - ) - num_samples = self.pipeline_parameters.max_samples - - predictions = [self._unpack(ast.literal_eval(p)) for p in dataset["predictions"][:num_samples]] - input_tokens = [self._parse_tensor_string(t) for t in dataset["input_tokens"][:num_samples]] - cont_tokens = [self._parse_tensor_string(t) for t in dataset["cont_tokens"][:num_samples]] - truncated = [ast.literal_eval(t)[0] for t in dataset["truncated"][:num_samples]] - padded = [ast.literal_eval(p)[0] for p in dataset["padded"][:num_samples]] - - if model_response_type == GenerativeResponse: - logits = [ast.literal_eval(p) for p in dataset["pred_logits"][:num_samples]] - - for metric_category, has_metric_category in task.has_metric_category.items(): - if not has_metric_category: - continue - - for idx in range(num_samples): - kwargs = { - "result": predictions[idx], - "input_tokens": input_tokens[idx], - "generated_tokens": cont_tokens[idx], - "truncated_tokens_count": truncated[idx], - "padded_tokens_count": padded[idx], - } - if model_response_type == GenerativeResponse: - kwargs["logits"] = logits[idx] - - response = model_response_type(**kwargs) - sample_id_to_responses[(SampleUid(task_name, f"{idx}_{0}"), metric_category)] = [response] - return sample_id_to_responses - - def _get_model_response_type(self, request_type): - if request_type == RequestType.LOGLIKELIHOOD: - model_response_type = LoglikelihoodResponse - elif request_type == RequestType.LOGLIKELIHOOD_SINGLE_TOKEN: - model_response_type = LoglikelihoodSingleTokenResponse - elif request_type == RequestType.LOGLIKELIHOOD_ROLLING: - model_response_type = LoglikelihoodResponse - elif request_type == RequestType.GREEDY_UNTIL_MULTI_TURN: - model_response_type = GenerativeMultiturnResponse - elif request_type == RequestType.GREEDY_UNTIL: - model_response_type = GenerativeResponse - else: - raise ValueError( - f"Loading responses from details for request type {request_type} is currently not supported" - ) - - return model_response_type - async def _run_model_async(self): - sample_id_to_responses: dict[(SampleUid, MetricCategory), list[ModelResponse]] = collections.defaultdict(list) - for request_type, requests in self.requests.items(): - logger.info(f"Sending {request_type} requests") - run_model = self.model.get_method_from_request_type(request_type=request_type) - responses = await run_model(requests) - - # Storing the responses associated to the same samples together - for response, request in zip(responses, requests): - for metric_category in request.metric_categories: - sample_id = SampleUid(request.task_name, request.sample_index) - sample_id_to_responses[(sample_id, metric_category)].append(response) - return sample_id_to_responses + outputs = {} + for sampling_method, docs in self.sampling_docs.items(): + logger.info(f"Running {sampling_method} requests") + match sampling_method: + case SamplingMethod.GENERATIVE: + model_outputs = await self.model.greedy_until(docs) + outputs[sampling_method] = model_outputs + case SamplingMethod.LOGPROBS: + model_outputs = await self.model.loglikelihood(docs) + outputs[sampling_method] = model_outputs + + return outputs def _run_model_sync(self): - sample_id_to_responses: dict[(SampleUid, MetricCategory), list[ModelResponse]] = collections.defaultdict(list) - for request_type, requests in self.requests.items(): - logger.info(f"Running {request_type} requests") - run_model = self.model.get_method_from_request_type(request_type=request_type) - responses = run_model(requests) - - # Storing the responses associated to the same samples together - for response, request in zip(responses, requests): - for metric_category in request.metric_categories: - sample_id = SampleUid(request.task_name, request.sample_index) - sample_id_to_responses[(sample_id, metric_category)].append(response) - return sample_id_to_responses + # Running all requests depending on the model call type (log likelihood, generative, ...) + # to be able to batch them + outputs = {} + for sampling_method, docs in self.sampling_docs.items(): + logger.info(f"Running {sampling_method} requests") + match sampling_method: + case SamplingMethod.GENERATIVE: + model_outputs = self.model.greedy_until(docs) + outputs[sampling_method] = model_outputs + case SamplingMethod.LOGPROBS: + model_outputs = self.model.loglikelihood(docs) + outputs[sampling_method] = model_outputs + case SamplingMethod.PERPLEXITY: + model_outputs = self.model.loglikelihood_rolling(docs) + outputs[sampling_method] = model_outputs + + return outputs def _run_model(self): # Running all requests depending on the model call type (log likelihood, generative, ...) @@ -485,63 +332,78 @@ def _run_model(self): logger.info("--- RUNNING MODEL ---") if self.model.is_async: - sample_id_to_responses = asyncio.run(self._run_model_async()) - + outputs = asyncio.run(self._run_model_async()) else: - sample_id_to_responses = self._run_model_sync() + outputs = self._run_model_sync() # Cleaning up the model before running metrics self.model.cleanup() - return sample_id_to_responses + return outputs - def _get_task(self, task_name: str): - short_task_name = task_name.rsplit("|", 1)[0] - return self.task_dict[short_task_name] - - def _compute_metrics(self, sample_id_to_responses): + def _compute_metrics(self, sampling_method_responses: dict[str, list[ModelResponse]]): # To compute the metrics we first group the samples and task and then by metrics. # This way we can batch the metrics computation for each task and metric category # This variable will hold the samples grouped by task and metric category # example: # task_metric_category_groups = { - # "task_name": { - # "metric_category": { - # "ids": [sample_id1, sample_id2, ...], - # "responses": [[response1_1, response1_2, ...], [response2_1, response2_2, ...], ...], - # "docs": [doc1, doc2, ...] + # "gsm8k_1": { + # "GENERATIVE": [ + # (doc1, response1), (doc2, response2), ..., # } + # "LOGLIKELIHOOD": [ + # (doc1, response1), (doc2, response2), ..., + # ] logger.info("--- COMPUTING METRICS ---") - task_metric_category_groups = collections.defaultdict( - lambda: collections.defaultdict(lambda: collections.defaultdict(list)) - ) + task_metric_category_groups = collections.defaultdict(lambda: collections.defaultdict(list)) - for (sample_id, metric_category), sample_responses in sample_id_to_responses.items(): - task_metric_category_groups[sample_id.task_name][metric_category]["ids"].append(sample_id.doc_id_seed) - task_metric_category_groups[sample_id.task_name][metric_category]["responses"].append(sample_responses) - task_metric_category_groups[sample_id.task_name][metric_category]["docs"].append(self.docs[sample_id]) + for sampling_method, model_responses in sampling_method_responses.items(): + for doc, model_reponse in zip(self.sampling_docs[sampling_method], model_responses): + task_metric_category_groups[doc.task_name][sampling_method].append((doc, model_reponse)) - for task_name, samples_per_metric in task_metric_category_groups.items(): - task: LightevalTask = self._get_task(task_name) + for task_name, samples_per_method in task_metric_category_groups.items(): + task: LightevalTask = self.tasks_dict[task_name] + for sampling_method, samples in samples_per_method.items(): + metric_category_metrics = [metric for metric in task.metrics if metric.category == sampling_method] - for metric_category, samples in samples_per_metric.items(): - sample_ids = samples["ids"] - responses = samples["responses"] - docs = samples["docs"] - metric_function = task.get_metric_method_from_category(metric_category=metric_category) - metric_category_metrics = [metric for metric in task.metrics if metric.category == metric_category] + docs = [doc for doc, _ in samples] + responses = [response for _, response in samples] - outputs = metric_function( - sample_ids=sample_ids, + outputs = apply_metric( + docs=docs, responses=responses, - formatted_docs=docs, metrics=metric_category_metrics, ) for output, doc, response in zip(outputs, docs, responses): self.evaluation_tracker.metrics_logger.log(task_name, output) - self.evaluation_tracker.details_logger.log(task_name, task, doc, response, output) + self.evaluation_tracker.details_logger.log(task_name, doc, response, output) + + def _load_responses_from_details(self): + logger.info("--- LOADING RESPONSES FROM DETAILS ---") + model_responses = {} + tasks_names = list(self.tasks_dict.keys()) + sampling_methods = list(self.sampling_docs.keys()) + + if len(sampling_methods) > 1: + raise ValueError( + "Loading responses from details when there are multiple request types is currently not supported" + ) + + assert self.pipeline_parameters.load_responses_from_details_date_id is not None + + details_datasets = self.evaluation_tracker.load_details_datasets( + self.pipeline_parameters.load_responses_from_details_date_id, tasks_names + ) + + for _, dataset in tqdm(details_datasets.items(), desc="Loading responses from details for tasks"): + for sampling_method in sampling_methods: + model_responses[sampling_method] = [ + ModelResponse(**model_response["model_response"]) for model_response in dataset + ] + + return model_responses def save_and_push_results(self): logger.info("--- SAVING AND PUSHING RESULTS ---") diff --git a/src/lighteval/tasks/default_prompts.py b/src/lighteval/tasks/default_prompts.py index 786c4a0b1..1e9f518e7 100644 --- a/src/lighteval/tasks/default_prompts.py +++ b/src/lighteval/tasks/default_prompts.py @@ -61,7 +61,7 @@ def mmmu_pro(line, task_name: Optional[str] = None): # Construct prompt formatted_choices = "\n".join(choices) - prompt = f"{instructions}\n{question}\n{formatted_choices}" + prompt = f"\n{question}\n{formatted_choices}" # Collect images image_order = [] @@ -907,7 +907,6 @@ def gpqa_instruct(line, task_name: str = None): query=query, choices=LETTER_INDICES[: len(choices)], gold_index=gold_index, - instruction=query, ) @@ -2732,6 +2731,7 @@ def language(code): query=f"{language(l_in)} phrase: " + line["translation"][l_in].rstrip() + f"\n{language(l_out)} phrase:", gold_index=0, choices=[line["translation"][l_out].rstrip()], + instruction=f"Translate {language(l_in)} to {language(l_out)}, do not explain, only output the translation.", ) diff --git a/src/lighteval/tasks/default_tasks.py b/src/lighteval/tasks/default_tasks.py index b77b27d52..815f08289 100644 --- a/src/lighteval/tasks/default_tasks.py +++ b/src/lighteval/tasks/default_tasks.py @@ -37,7 +37,7 @@ few_shots_split=None, few_shots_select=None, generation_size=30, # expected an answer in a format 'Answer: B' - metric=[Metrics.gpqa_instruct_metric], + metrics=[Metrics.gpqa_instruct_metric], stop_sequence=None, trust_dataset=True, version=0, @@ -53,7 +53,7 @@ few_shots_split=None, few_shots_select=None, generation_size=30, # expected an answer in a format 'Answer: B' - metric=[Metrics.gpqa_instruct_metric], + metrics=[Metrics.gpqa_instruct_metric], stop_sequence=None, trust_dataset=True, version=0, @@ -69,7 +69,7 @@ few_shots_split=None, few_shots_select=None, generation_size=30, # expected an answer in a format 'Answer: B' - metric=[Metrics.gpqa_instruct_metric], + metrics=[Metrics.gpqa_instruct_metric], stop_sequence=None, trust_dataset=True, version=0, @@ -85,7 +85,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -101,7 +101,7 @@ few_shots_split=None, few_shots_select="random_sampling", generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], stop_sequence=None, trust_dataset=True, version=0, @@ -117,7 +117,7 @@ few_shots_split=None, few_shots_select="random_sampling", generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], stop_sequence=None, trust_dataset=True, version=0, @@ -133,7 +133,7 @@ few_shots_split=None, few_shots_select="random_sampling", generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], stop_sequence=None, trust_dataset=True, version=0, @@ -149,7 +149,7 @@ few_shots_split=None, few_shots_select="random_sampling", generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], stop_sequence=None, trust_dataset=True, version=0, @@ -165,7 +165,7 @@ few_shots_split=None, few_shots_select="random_sampling", generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], stop_sequence=None, trust_dataset=True, version=0, @@ -181,7 +181,7 @@ few_shots_split=None, few_shots_select="random_sampling", generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], stop_sequence=None, trust_dataset=True, version=0, @@ -197,7 +197,7 @@ few_shots_split=None, few_shots_select="random_sampling", generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], stop_sequence=None, trust_dataset=True, version=0, @@ -213,7 +213,7 @@ few_shots_split=None, few_shots_select="random_sampling", generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], stop_sequence=None, trust_dataset=True, version=0, @@ -229,7 +229,7 @@ few_shots_split=None, few_shots_select="random_sampling", generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], stop_sequence=None, trust_dataset=True, version=0, @@ -245,7 +245,7 @@ few_shots_split=None, few_shots_select="random_sampling", generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], stop_sequence=None, trust_dataset=True, version=0, @@ -261,7 +261,7 @@ few_shots_split=None, few_shots_select="random_sampling", generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], stop_sequence=None, trust_dataset=True, version=0, @@ -277,7 +277,7 @@ few_shots_split=None, few_shots_select="random_sampling", generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], stop_sequence=None, trust_dataset=True, version=0, @@ -293,7 +293,7 @@ few_shots_split=None, few_shots_select="random_sampling", generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], stop_sequence=None, trust_dataset=True, version=0, @@ -309,7 +309,7 @@ few_shots_split=None, few_shots_select="random_sampling", generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], stop_sequence=None, trust_dataset=True, version=0, @@ -325,7 +325,7 @@ few_shots_split=None, few_shots_select="random_sampling", generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], stop_sequence=None, trust_dataset=True, version=0, @@ -341,7 +341,7 @@ few_shots_split=None, few_shots_select="random_sampling", generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], stop_sequence=None, trust_dataset=True, version=0, @@ -357,7 +357,7 @@ few_shots_split=None, few_shots_select="random_sampling", generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], stop_sequence=None, trust_dataset=True, version=0, @@ -372,8 +372,8 @@ evaluation_splits=["train"], few_shots_split=None, few_shots_select=None, - generation_size=32768, - metric=[ + generation_size=None, + metrics=[ Metrics.math_pass_at_1_1n, Metrics.math_pass_at_1_4n, Metrics.math_pass_at_1_8n, @@ -394,7 +394,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8192, - metric=[Metrics.g_pass_at_16_expr_gold], + metrics=[Metrics.g_pass_at_16_expr_gold], version=1, ) aime25 = LightevalTaskConfig( @@ -408,13 +408,13 @@ few_shots_split=None, few_shots_select=None, generation_size=10000, - metric=[ + metrics=[ Metrics.math_pass_at_1_1n, - Metrics.math_pass_at_1_4n, - Metrics.math_pass_at_1_8n, - Metrics.math_pass_at_1_16n, - Metrics.math_pass_at_1_32n, - Metrics.math_pass_at_1_64n, + # Metrics.math_pass_at_1_4n, + # Metrics.math_pass_at_1_8n, + # Metrics.math_pass_at_1_16n, + # Metrics.math_pass_at_1_32n, + # Metrics.math_pass_at_1_64n, ], version=2, ) @@ -429,7 +429,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8192, - metric=[Metrics.g_pass_at_16_expr_gold], + metrics=[Metrics.g_pass_at_16_expr_gold], version=1, ) anachronisms_bigbench = LightevalTaskConfig( @@ -443,7 +443,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -459,7 +459,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -475,7 +475,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -491,7 +491,7 @@ few_shots_split="train_r1", few_shots_select="random_sampling_from_train", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -507,7 +507,7 @@ few_shots_split="train_r2", few_shots_select="random_sampling_from_train", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -523,7 +523,7 @@ few_shots_split="train_r3", few_shots_select="random_sampling_from_train", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -539,7 +539,7 @@ few_shots_split=None, few_shots_select=None, generation_size=2048, - metric=[Metrics.exact_match], + metrics=[Metrics.exact_match], stop_sequence=None, trust_dataset=False, version=0, @@ -555,7 +555,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.exact_match], + metrics=[Metrics.loglikelihood_acc, Metrics.exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -571,7 +571,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -587,7 +587,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -603,7 +603,7 @@ few_shots_split=None, few_shots_select="random_sampling_from_train", generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -619,7 +619,7 @@ few_shots_split=None, few_shots_select="random_sampling_from_train", generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -635,7 +635,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.acc_golds_likelihood], + metrics=[Metrics.acc_golds_likelihood], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -651,7 +651,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.acc_golds_likelihood], + metrics=[Metrics.acc_golds_likelihood], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -667,7 +667,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.acc_golds_likelihood], + metrics=[Metrics.acc_golds_likelihood], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -683,7 +683,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.acc_golds_likelihood], + metrics=[Metrics.acc_golds_likelihood], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -699,7 +699,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.acc_golds_likelihood], + metrics=[Metrics.acc_golds_likelihood], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -715,7 +715,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.acc_golds_likelihood], + metrics=[Metrics.acc_golds_likelihood], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -731,7 +731,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.acc_golds_likelihood], + metrics=[Metrics.acc_golds_likelihood], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -747,7 +747,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.acc_golds_likelihood], + metrics=[Metrics.acc_golds_likelihood], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -763,7 +763,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.acc_golds_likelihood], + metrics=[Metrics.acc_golds_likelihood], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -779,7 +779,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.acc_golds_likelihood], + metrics=[Metrics.acc_golds_likelihood], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -795,7 +795,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.perfect_exact_match], + metrics=[Metrics.loglikelihood_acc, Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -811,7 +811,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.perfect_exact_match], + metrics=[Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -827,7 +827,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.acc_golds_likelihood], + metrics=[Metrics.acc_golds_likelihood], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -843,7 +843,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -859,7 +859,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.bleu], + metrics=[Metrics.bleu], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -875,7 +875,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.perfect_exact_match], + metrics=[Metrics.perfect_exact_match], stop_sequence=None, trust_dataset=True, version=0, @@ -891,7 +891,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -912,7 +912,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["", "Q=", "\n\n"], trust_dataset=True, version=0, @@ -928,7 +928,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["", "Q=", "\n\n"], trust_dataset=True, version=0, @@ -944,7 +944,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["", "Q=", "\n\n"], trust_dataset=True, version=0, @@ -960,7 +960,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["", "Q=", "\n\n"], trust_dataset=True, version=0, @@ -976,7 +976,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["", "Q=", "\n\n"], trust_dataset=True, version=0, @@ -992,7 +992,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["", "Q=", "\n\n"], trust_dataset=True, version=0, @@ -1008,7 +1008,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["", "Q=", "\n\n"], trust_dataset=True, version=0, @@ -1024,7 +1024,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["", "Q=", "\n\n"], trust_dataset=True, version=0, @@ -1040,7 +1040,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["", "Q=", "\n\n"], trust_dataset=True, version=0, @@ -1056,7 +1056,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["", "Q=", "\n\n"], trust_dataset=True, version=0, @@ -1072,7 +1072,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["", "Q=", "\n\n"], trust_dataset=True, version=0, @@ -1088,7 +1088,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["", "Q=", "\n\n"], trust_dataset=True, version=0, @@ -1104,7 +1104,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["", "Q=", "\n\n"], trust_dataset=True, version=0, @@ -1120,7 +1120,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["", "Q=", "\n\n"], trust_dataset=True, version=0, @@ -1136,7 +1136,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["", "Q=", "\n\n"], trust_dataset=True, version=0, @@ -1152,7 +1152,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["", "Q=", "\n\n"], trust_dataset=True, version=0, @@ -1168,7 +1168,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["", "Q=", "\n\n"], trust_dataset=True, version=0, @@ -1184,7 +1184,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["", "Q=", "\n\n"], trust_dataset=True, version=0, @@ -1200,7 +1200,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["", "Q=", "\n\n"], must_remove_duplicate_docs=True, trust_dataset=True, @@ -1217,7 +1217,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["", "Q=", "\n\n"], must_remove_duplicate_docs=True, trust_dataset=True, @@ -1234,7 +1234,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["", "Q=", "\n\n"], must_remove_duplicate_docs=True, trust_dataset=True, @@ -1251,7 +1251,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["", "Q=", "\n\n"], must_remove_duplicate_docs=True, trust_dataset=True, @@ -1268,7 +1268,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["", "Q=", "\n\n"], must_remove_duplicate_docs=True, trust_dataset=True, @@ -1285,7 +1285,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["", "Q=", "\n\n"], must_remove_duplicate_docs=True, trust_dataset=True, @@ -1302,7 +1302,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["", "Q=", "\n\n"], must_remove_duplicate_docs=True, trust_dataset=True, @@ -1319,7 +1319,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["", "Q=", "\n\n"], must_remove_duplicate_docs=True, trust_dataset=True, @@ -1336,7 +1336,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["", "Q=", "\n\n"], must_remove_duplicate_docs=True, trust_dataset=True, @@ -1353,7 +1353,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["", "Q=", "\n\n"], must_remove_duplicate_docs=True, trust_dataset=True, @@ -1370,7 +1370,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["", "Q=", "\n\n"], must_remove_duplicate_docs=True, trust_dataset=True, @@ -1387,7 +1387,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["", "Q=", "\n\n"], must_remove_duplicate_docs=True, trust_dataset=True, @@ -1404,7 +1404,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["", "Q=", "\n\n"], must_remove_duplicate_docs=True, trust_dataset=True, @@ -1421,7 +1421,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["", "Q=", "\n\n"], must_remove_duplicate_docs=True, trust_dataset=True, @@ -1438,7 +1438,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["", "Q=", "\n\n"], must_remove_duplicate_docs=True, trust_dataset=True, @@ -1455,7 +1455,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["", "Q=", "\n\n"], must_remove_duplicate_docs=True, trust_dataset=True, @@ -1472,7 +1472,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["", "Q=", "\n\n"], must_remove_duplicate_docs=True, trust_dataset=True, @@ -1489,7 +1489,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["", "Q=", "\n\n"], must_remove_duplicate_docs=True, trust_dataset=True, @@ -1506,7 +1506,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -1528,7 +1528,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -1550,7 +1550,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -1572,7 +1572,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -1594,7 +1594,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -1616,7 +1616,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -1638,7 +1638,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -1660,7 +1660,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -1682,7 +1682,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -1704,7 +1704,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -1726,7 +1726,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -1748,7 +1748,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -1770,7 +1770,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -1792,7 +1792,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -1814,7 +1814,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -1836,7 +1836,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -1858,7 +1858,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -1880,7 +1880,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -1902,7 +1902,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -1924,7 +1924,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -1946,7 +1946,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -1968,7 +1968,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -1990,7 +1990,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2012,7 +2012,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2034,7 +2034,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2056,7 +2056,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2078,7 +2078,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2100,7 +2100,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2122,7 +2122,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2144,7 +2144,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2166,7 +2166,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2188,7 +2188,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2210,7 +2210,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2232,7 +2232,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2254,7 +2254,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2276,7 +2276,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2298,7 +2298,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2320,7 +2320,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2342,7 +2342,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2364,7 +2364,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -2380,7 +2380,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.exact_match, Metrics.quasi_exact_match], + metrics=[Metrics.exact_match, Metrics.quasi_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -2396,7 +2396,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2417,7 +2417,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2438,7 +2438,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2459,7 +2459,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2480,7 +2480,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2501,7 +2501,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2522,7 +2522,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2543,7 +2543,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2564,7 +2564,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2585,7 +2585,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2606,7 +2606,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2627,7 +2627,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2648,7 +2648,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2669,7 +2669,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2690,7 +2690,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2711,7 +2711,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2732,7 +2732,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2753,7 +2753,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2774,7 +2774,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2795,7 +2795,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2816,7 +2816,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2837,7 +2837,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2858,7 +2858,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2879,7 +2879,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -2900,7 +2900,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.rouge1, Metrics.rouge2, Metrics.rougeL], + metrics=[Metrics.rouge1, Metrics.rouge2, Metrics.rougeL], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -2916,7 +2916,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.rouge1, Metrics.rouge2, Metrics.rougeL], + metrics=[Metrics.rouge1, Metrics.rouge2, Metrics.rougeL], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -2932,7 +2932,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.rouge1, Metrics.rouge2, Metrics.rougeL], + metrics=[Metrics.rouge1, Metrics.rouge2, Metrics.rougeL], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -2948,7 +2948,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.rouge1, Metrics.rouge2, Metrics.rougeL], + metrics=[Metrics.rouge1, Metrics.rouge2, Metrics.rougeL], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -2964,7 +2964,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.rouge1, Metrics.rouge2, Metrics.rougeL], + metrics=[Metrics.rouge1, Metrics.rouge2, Metrics.rougeL], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -2980,7 +2980,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.rouge1, Metrics.rouge2, Metrics.rougeL], + metrics=[Metrics.rouge1, Metrics.rouge2, Metrics.rougeL], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -2996,7 +2996,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.rouge1, Metrics.rouge2, Metrics.rougeL], + metrics=[Metrics.rouge1, Metrics.rouge2, Metrics.rougeL], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -3012,7 +3012,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.rouge1, Metrics.rouge2, Metrics.rougeL], + metrics=[Metrics.rouge1, Metrics.rouge2, Metrics.rougeL], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -3028,7 +3028,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.rouge1, Metrics.rouge2, Metrics.rougeL], + metrics=[Metrics.rouge1, Metrics.rouge2, Metrics.rougeL], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -3044,7 +3044,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.rouge1, Metrics.rouge2, Metrics.rougeL], + metrics=[Metrics.rouge1, Metrics.rouge2, Metrics.rougeL], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -3060,7 +3060,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.rouge1, Metrics.rouge2, Metrics.rougeL], + metrics=[Metrics.rouge1, Metrics.rouge2, Metrics.rougeL], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -3076,7 +3076,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.rouge1, Metrics.rouge2, Metrics.rougeL], + metrics=[Metrics.rouge1, Metrics.rouge2, Metrics.rougeL], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -3092,7 +3092,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.rouge1, Metrics.rouge2, Metrics.rougeL], + metrics=[Metrics.rouge1, Metrics.rouge2, Metrics.rougeL], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -3108,7 +3108,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.rouge1, Metrics.rouge2, Metrics.rougeL], + metrics=[Metrics.rouge1, Metrics.rouge2, Metrics.rougeL], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -3124,7 +3124,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.rouge1, Metrics.rouge2, Metrics.rougeL], + metrics=[Metrics.rouge1, Metrics.rouge2, Metrics.rougeL], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -3140,7 +3140,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.rouge1, Metrics.rouge2, Metrics.rougeL], + metrics=[Metrics.rouge1, Metrics.rouge2, Metrics.rougeL], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -3156,7 +3156,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -3177,7 +3177,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -3198,7 +3198,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -3219,7 +3219,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -3240,7 +3240,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -3261,7 +3261,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.exact_match, Metrics.quasi_exact_match], + metrics=[Metrics.exact_match, Metrics.quasi_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -3277,7 +3277,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -3298,7 +3298,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -3319,7 +3319,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -3340,7 +3340,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -3361,7 +3361,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -3382,7 +3382,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -3403,7 +3403,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.exact_match, Metrics.quasi_exact_match], + metrics=[Metrics.exact_match, Metrics.quasi_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -3419,7 +3419,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.exact_match, Metrics.quasi_exact_match], + metrics=[Metrics.exact_match, Metrics.quasi_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -3435,7 +3435,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -3456,7 +3456,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.exact_match, Metrics.quasi_exact_match], + metrics=[Metrics.exact_match, Metrics.quasi_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -3472,7 +3472,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -3493,7 +3493,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -3514,7 +3514,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -3535,7 +3535,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -3556,7 +3556,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -3577,7 +3577,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -3598,7 +3598,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -3619,7 +3619,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -3640,7 +3640,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -3661,7 +3661,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -3682,7 +3682,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -3698,7 +3698,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -3714,7 +3714,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -3730,7 +3730,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -3746,7 +3746,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -3762,7 +3762,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -3778,7 +3778,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -3794,7 +3794,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -3810,7 +3810,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -3826,7 +3826,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -3842,7 +3842,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -3858,7 +3858,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -3874,7 +3874,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -3890,7 +3890,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -3906,7 +3906,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -3922,7 +3922,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -3938,7 +3938,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -3954,7 +3954,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -3970,7 +3970,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -3986,7 +3986,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4002,7 +4002,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4018,7 +4018,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4034,7 +4034,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4050,7 +4050,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4066,7 +4066,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4082,7 +4082,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4098,7 +4098,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4114,7 +4114,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4130,7 +4130,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4146,7 +4146,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4162,7 +4162,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4178,7 +4178,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4194,7 +4194,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4210,7 +4210,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4226,7 +4226,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4242,7 +4242,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4258,7 +4258,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4274,7 +4274,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4290,7 +4290,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4306,7 +4306,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4322,7 +4322,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4338,7 +4338,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4354,7 +4354,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4370,7 +4370,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4386,7 +4386,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4402,7 +4402,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4418,7 +4418,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4434,7 +4434,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4450,7 +4450,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4466,7 +4466,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4482,7 +4482,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4498,7 +4498,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4514,7 +4514,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4530,7 +4530,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4546,7 +4546,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4562,7 +4562,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4578,7 +4578,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4594,7 +4594,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4610,7 +4610,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4626,7 +4626,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4642,7 +4642,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4658,7 +4658,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4674,7 +4674,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4690,7 +4690,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4706,7 +4706,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4722,7 +4722,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4738,7 +4738,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4754,7 +4754,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4770,7 +4770,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4786,7 +4786,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4802,7 +4802,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4818,7 +4818,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4834,7 +4834,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4850,7 +4850,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4866,7 +4866,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4882,7 +4882,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4898,7 +4898,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4914,7 +4914,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4930,7 +4930,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4946,7 +4946,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4962,7 +4962,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4978,7 +4978,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -4994,7 +4994,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5010,7 +5010,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5026,7 +5026,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5042,7 +5042,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5058,7 +5058,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5074,7 +5074,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5090,7 +5090,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5106,7 +5106,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5122,7 +5122,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5138,7 +5138,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5154,7 +5154,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5170,7 +5170,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5186,7 +5186,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5202,7 +5202,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5218,7 +5218,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5234,7 +5234,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5250,7 +5250,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5266,7 +5266,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5282,7 +5282,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5298,7 +5298,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5314,7 +5314,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5330,7 +5330,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5346,7 +5346,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5362,7 +5362,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5378,7 +5378,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5394,7 +5394,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5410,7 +5410,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5426,7 +5426,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5442,7 +5442,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5458,7 +5458,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5474,7 +5474,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5490,7 +5490,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5506,7 +5506,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5522,7 +5522,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5538,7 +5538,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5554,7 +5554,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5570,7 +5570,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5586,7 +5586,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5602,7 +5602,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5618,7 +5618,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5634,7 +5634,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5650,7 +5650,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5666,7 +5666,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5682,7 +5682,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5698,7 +5698,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5714,7 +5714,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5730,7 +5730,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5746,7 +5746,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5762,7 +5762,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5778,7 +5778,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5794,7 +5794,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5810,7 +5810,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5826,7 +5826,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.prediction_perplexity], + metrics=[Metrics.prediction_perplexity], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5842,7 +5842,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.prediction_perplexity], + metrics=[Metrics.prediction_perplexity], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5858,7 +5858,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.prediction_perplexity], + metrics=[Metrics.prediction_perplexity], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5874,7 +5874,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.prediction_perplexity], + metrics=[Metrics.prediction_perplexity], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5890,7 +5890,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.prediction_perplexity], + metrics=[Metrics.prediction_perplexity], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5906,7 +5906,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.prediction_perplexity], + metrics=[Metrics.prediction_perplexity], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5922,7 +5922,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -5943,7 +5943,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -5964,7 +5964,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.perfect_exact_match], + metrics=[Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5980,7 +5980,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -5996,7 +5996,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6012,7 +6012,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.perfect_exact_match], + metrics=[Metrics.loglikelihood_acc, Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6028,7 +6028,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.perfect_exact_match], + metrics=[Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6044,7 +6044,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.perfect_exact_match], + metrics=[Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6060,7 +6060,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6076,7 +6076,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -6099,7 +6099,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -6122,7 +6122,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -6145,7 +6145,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -6168,7 +6168,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -6191,7 +6191,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -6214,7 +6214,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -6237,7 +6237,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -6260,7 +6260,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -6283,7 +6283,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6299,7 +6299,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.rouge_t5, Metrics.bleu], + metrics=[Metrics.rouge_t5, Metrics.bleu], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6315,7 +6315,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.rouge_t5, Metrics.bleu, Metrics.loglikelihood_acc, Metrics.perfect_exact_match], + metrics=[Metrics.rouge_t5, Metrics.bleu, Metrics.loglikelihood_acc, Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6331,7 +6331,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6347,7 +6347,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -6368,7 +6368,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6384,7 +6384,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.rouge_t5, Metrics.bleu, Metrics.perfect_exact_match], + metrics=[Metrics.rouge_t5, Metrics.bleu, Metrics.perfect_exact_match], stop_sequence=[".", ";", "!", "?"], trust_dataset=True, version=0, @@ -6400,7 +6400,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.rouge_t5, Metrics.loglikelihood_acc, Metrics.perfect_exact_match], + metrics=[Metrics.rouge_t5, Metrics.loglikelihood_acc, Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6416,7 +6416,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.copyright], + metrics=[Metrics.copyright], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6432,7 +6432,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.copyright], + metrics=[Metrics.copyright], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6448,7 +6448,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.copyright], + metrics=[Metrics.copyright], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6464,7 +6464,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.copyright], + metrics=[Metrics.copyright], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6480,7 +6480,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.copyright], + metrics=[Metrics.copyright], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6496,7 +6496,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.copyright], + metrics=[Metrics.copyright], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6512,7 +6512,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.copyright], + metrics=[Metrics.copyright], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6528,7 +6528,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.copyright], + metrics=[Metrics.copyright], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6544,7 +6544,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.copyright], + metrics=[Metrics.copyright], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6560,7 +6560,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.copyright], + metrics=[Metrics.copyright], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6576,7 +6576,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.copyright], + metrics=[Metrics.copyright], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6592,7 +6592,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.copyright], + metrics=[Metrics.copyright], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6608,7 +6608,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.copyright], + metrics=[Metrics.copyright], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6624,7 +6624,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.copyright], + metrics=[Metrics.copyright], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6640,7 +6640,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.copyright], + metrics=[Metrics.copyright], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6656,7 +6656,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.copyright], + metrics=[Metrics.copyright], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6672,7 +6672,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.copyright], + metrics=[Metrics.copyright], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6695,7 +6695,7 @@ stop_sequence=["\n", "Question:", "question:"], generation_size=100, version=1, - metric=( + metrics=( Metrics.prefix_quasi_exact_match, Metrics.f1_score_quasi, ), @@ -6711,7 +6711,7 @@ few_shots_split=None, few_shots_select=None, generation_size=10, - metric=[Metrics.perfect_exact_match, Metrics.f1_score], + metrics=[Metrics.perfect_exact_match, Metrics.f1_score], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6727,7 +6727,7 @@ few_shots_split=None, few_shots_select=None, generation_size=128, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.f1_score, @@ -6750,7 +6750,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6766,7 +6766,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6782,7 +6782,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6798,7 +6798,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.perfect_exact_match], + metrics=[Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6814,7 +6814,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6830,7 +6830,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6846,7 +6846,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6862,7 +6862,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6878,7 +6878,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6894,7 +6894,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.perfect_exact_match], + metrics=[Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6931,7 +6931,7 @@ few_shots_split="train", generation_size=250, stop_sequence=["Question:", "question:", "\n"], - metric=( + metrics=( Metrics.prefix_quasi_exact_match, Metrics.f1_score_quasi, ), @@ -6948,7 +6948,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[Metrics.exact_match], + metrics=[Metrics.exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6964,7 +6964,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[Metrics.exact_match], + metrics=[Metrics.exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6980,7 +6980,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[Metrics.exact_match], + metrics=[Metrics.exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -6996,7 +6996,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7012,7 +7012,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7028,7 +7028,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.rouge_t5, Metrics.bleu, Metrics.loglikelihood_acc, Metrics.perfect_exact_match], + metrics=[Metrics.rouge_t5, Metrics.bleu, Metrics.loglikelihood_acc, Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7044,7 +7044,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7060,7 +7060,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7076,7 +7076,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7092,7 +7092,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7108,7 +7108,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7124,7 +7124,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7140,7 +7140,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -7161,7 +7161,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -7182,7 +7182,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -7203,7 +7203,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -7224,7 +7224,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -7245,7 +7245,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -7266,7 +7266,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -7287,7 +7287,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -7308,7 +7308,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -7329,7 +7329,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -7350,7 +7350,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -7371,7 +7371,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -7392,7 +7392,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -7413,7 +7413,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -7434,7 +7434,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -7455,7 +7455,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7471,7 +7471,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7487,7 +7487,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7503,7 +7503,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7519,7 +7519,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7535,7 +7535,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7551,7 +7551,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7567,7 +7567,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7583,7 +7583,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7599,7 +7599,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.bleu, Metrics.bleurt], + metrics=[Metrics.bleu, Metrics.bleurt], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7615,7 +7615,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7631,7 +7631,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7647,7 +7647,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.bleu, Metrics.rouge_t5], + metrics=[Metrics.bleu, Metrics.rouge_t5], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7663,7 +7663,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.perfect_exact_match], + metrics=[Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7679,7 +7679,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7695,7 +7695,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.rouge_t5, Metrics.bleu, Metrics.loglikelihood_acc, Metrics.perfect_exact_match], + metrics=[Metrics.rouge_t5, Metrics.bleu, Metrics.loglikelihood_acc, Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7711,7 +7711,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.mcc_single_token], + metrics=[Metrics.loglikelihood_acc, Metrics.mcc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7727,7 +7727,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7743,7 +7743,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7759,7 +7759,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_f1], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_f1], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7775,7 +7775,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7791,7 +7791,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_f1], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_f1], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7807,7 +7807,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7823,7 +7823,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7839,7 +7839,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7855,7 +7855,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7871,7 +7871,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7887,7 +7887,7 @@ few_shots_split=None, few_shots_select="random_sampling", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7903,10 +7903,10 @@ few_shots_split=None, few_shots_select=None, generation_size=32768, # needed for reasoning models like R1 - metric=[ + metrics=[ Metrics.gpqa_instruct_pass_at_1_1n, - Metrics.gpqa_instruct_pass_at_1_4n, - Metrics.gpqa_instruct_pass_at_1_8n, + # Metrics.gpqa_instruct_pass_at_1_4n, + # Metrics.gpqa_instruct_pass_at_1_8n, ], stop_sequence=[], # no stop sequence, will use eos token trust_dataset=True, @@ -7923,7 +7923,7 @@ few_shots_split=None, few_shots_select=None, generation_size=32768, # needed for reasoning models like R1 - metric=[Metrics.gpqa_instruct_metric], + metrics=[Metrics.gpqa_instruct_metric], stop_sequence=[], # no stop sequence, will use eos token trust_dataset=True, version=0, @@ -7939,7 +7939,7 @@ few_shots_split=None, few_shots_select=None, generation_size=32768, # needed for reasoning models like R1 - metric=[Metrics.gpqa_instruct_metric], + metrics=[Metrics.gpqa_instruct_metric], stop_sequence=[], # no stop sequence, will use eos token trust_dataset=True, version=0, @@ -7955,7 +7955,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7971,7 +7971,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.expr_gold_metric], + metrics=[Metrics.expr_gold_metric], stop_sequence=None, trust_dataset=True, version=0, @@ -7987,8 +7987,8 @@ few_shots_split=None, few_shots_select="random_sampling_from_train", generation_size=256, - metric=[Metrics.quasi_exact_match_gsm8k], - stop_sequence=["Question:"], + metrics=[Metrics.quasi_exact_match_gsm8k], + stop_sequence=[], trust_dataset=True, version=0, ) @@ -8003,7 +8003,7 @@ few_shots_split=None, few_shots_select="random_sampling_from_train", generation_size=256, - metric=[ + metrics=[ Metrics.expr_gold_metric, ], stop_sequence=["Question:"], @@ -8021,7 +8021,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8037,7 +8037,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8053,7 +8053,7 @@ few_shots_split=None, few_shots_select="random_sampling_from_train", generation_size=-1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8069,7 +8069,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -8090,7 +8090,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8106,7 +8106,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.bleu, Metrics.rouge_t5, Metrics.perfect_exact_match], + metrics=[Metrics.bleu, Metrics.rouge_t5, Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8122,7 +8122,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8138,7 +8138,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8154,7 +8154,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8170,7 +8170,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8186,7 +8186,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8202,7 +8202,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8218,7 +8218,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -8241,7 +8241,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -8264,7 +8264,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8280,7 +8280,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8296,7 +8296,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8312,7 +8312,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -8333,7 +8333,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -8354,7 +8354,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -8375,7 +8375,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -8396,7 +8396,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -8417,7 +8417,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -8438,7 +8438,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8454,7 +8454,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.bleu, Metrics.rouge_t5, Metrics.perfect_exact_match], + metrics=[Metrics.bleu, Metrics.rouge_t5, Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8470,7 +8470,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8486,7 +8486,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8502,7 +8502,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8518,7 +8518,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8534,7 +8534,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8550,7 +8550,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8566,7 +8566,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8582,7 +8582,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8598,7 +8598,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8614,7 +8614,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8630,7 +8630,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8646,7 +8646,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8662,7 +8662,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8678,7 +8678,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8699,7 +8699,7 @@ few_shots_split="train", generation_size=250, stop_sequence=["\n", "Question:", "question:"], - metric=( + metrics=( Metrics.prefix_quasi_exact_match, Metrics.f1_score_quasi, ), @@ -8715,7 +8715,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8731,7 +8731,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8747,7 +8747,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8763,7 +8763,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8779,7 +8779,7 @@ few_shots_split=None, few_shots_select=None, generation_size=10, - metric=[Metrics.target_perplexity], + metrics=[Metrics.target_perplexity], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8795,7 +8795,7 @@ few_shots_split=None, few_shots_select=None, generation_size=10, - metric=[Metrics.target_perplexity], + metrics=[Metrics.target_perplexity], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8811,7 +8811,7 @@ few_shots_split=None, few_shots_select=None, generation_size=10, - metric=[Metrics.target_perplexity], + metrics=[Metrics.target_perplexity], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8827,7 +8827,7 @@ few_shots_split=None, few_shots_select=None, generation_size=10, - metric=[Metrics.target_perplexity], + metrics=[Metrics.target_perplexity], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8843,7 +8843,7 @@ few_shots_split=None, few_shots_select=None, generation_size=10, - metric=[Metrics.target_perplexity], + metrics=[Metrics.target_perplexity], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8859,7 +8859,7 @@ few_shots_split=None, few_shots_select=None, generation_size=10, - metric=[Metrics.target_perplexity], + metrics=[Metrics.target_perplexity], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8875,7 +8875,7 @@ few_shots_split=None, few_shots_select=None, generation_size=10, - metric=[Metrics.target_perplexity], + metrics=[Metrics.target_perplexity], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8891,7 +8891,7 @@ few_shots_split=None, few_shots_select=None, generation_size=10, - metric=[Metrics.target_perplexity], + metrics=[Metrics.target_perplexity], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8907,7 +8907,7 @@ few_shots_split=None, few_shots_select=None, generation_size=10, - metric=[Metrics.target_perplexity], + metrics=[Metrics.target_perplexity], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8923,7 +8923,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.bleu, Metrics.rouge_t5, Metrics.perfect_exact_match], + metrics=[Metrics.bleu, Metrics.rouge_t5, Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8939,7 +8939,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -8955,7 +8955,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1024, - metric=[ + metrics=[ Metrics.rouge1, Metrics.rouge2, Metrics.rougeL, @@ -8978,7 +8978,7 @@ few_shots_split=None, few_shots_select=None, generation_size=2048, - metric=[ + metrics=[ Metrics.rouge1, Metrics.rouge2, Metrics.rougeL, @@ -9001,7 +9001,7 @@ few_shots_split=None, few_shots_select=None, generation_size=256, - metric=[ + metrics=[ Metrics.rouge1, Metrics.rouge2, Metrics.rougeL, @@ -9024,7 +9024,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[ + metrics=[ Metrics.loglikelihood_acc, Metrics.exact_match, Metrics.quasi_exact_match, @@ -9046,7 +9046,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.f1_score, @@ -9068,7 +9068,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.f1_score, @@ -9090,7 +9090,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.f1_score, @@ -9112,7 +9112,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.f1_score, @@ -9134,7 +9134,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.f1_score, @@ -9156,7 +9156,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.f1_score, @@ -9178,7 +9178,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.f1_score, @@ -9200,7 +9200,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.f1_score, @@ -9222,7 +9222,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.f1_score, @@ -9244,7 +9244,7 @@ few_shots_split=None, few_shots_select=None, generation_size=10, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.f1_score, @@ -9266,7 +9266,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.f1_score, @@ -9288,7 +9288,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.f1_score, @@ -9310,7 +9310,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.f1_score, @@ -9332,7 +9332,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[Metrics.exact_match, Metrics.quasi_exact_match, Metrics.f1_score], + metrics=[Metrics.exact_match, Metrics.quasi_exact_match, Metrics.f1_score], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -9348,7 +9348,7 @@ few_shots_split=None, few_shots_select=None, generation_size=430, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.f1_score, @@ -9370,7 +9370,7 @@ few_shots_split=None, few_shots_select=None, generation_size=788, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.f1_score, @@ -9392,7 +9392,7 @@ few_shots_split=None, few_shots_select=None, generation_size=338, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.f1_score, @@ -9414,7 +9414,7 @@ few_shots_split=None, few_shots_select=None, generation_size=274, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.f1_score, @@ -9436,7 +9436,7 @@ few_shots_split=None, few_shots_select=None, generation_size=274, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.f1_score, @@ -9458,7 +9458,7 @@ few_shots_split=None, few_shots_select=None, generation_size=10, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.f1_score, @@ -9480,7 +9480,7 @@ few_shots_split=None, few_shots_select=None, generation_size=10, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.f1_score, @@ -9502,7 +9502,7 @@ few_shots_split=None, few_shots_select=None, generation_size=10, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.f1_score, @@ -9524,7 +9524,7 @@ few_shots_split=None, few_shots_select=None, generation_size=10, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.f1_score, @@ -9546,7 +9546,7 @@ few_shots_split=None, few_shots_select=None, generation_size=10, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.f1_score, @@ -9568,7 +9568,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.f1_score, @@ -9590,7 +9590,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.perfect_exact_match], + metrics=[Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -9606,7 +9606,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.bleu, Metrics.rouge_t5, Metrics.perfect_exact_match], + metrics=[Metrics.bleu, Metrics.rouge_t5, Metrics.perfect_exact_match], stop_sequence=None, trust_dataset=True, version=0, @@ -9622,7 +9622,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -9638,7 +9638,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -9654,7 +9654,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -9670,7 +9670,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -9686,7 +9686,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -9702,7 +9702,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -9718,7 +9718,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -9739,7 +9739,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -9760,7 +9760,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -9781,7 +9781,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -9802,7 +9802,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -9823,7 +9823,7 @@ few_shots_split=None, few_shots_select=None, generation_size=32768, - metric=[ + metrics=[ Metrics.math_pass_at_1_1n, Metrics.math_pass_at_1_4n, ], @@ -9840,7 +9840,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8192, - metric=[Metrics.g_pass_at_16_latex_gold], + metrics=[Metrics.g_pass_at_16_latex_gold], version=1, ) math_algebra_lighteval = LightevalTaskConfig( @@ -9854,7 +9854,7 @@ few_shots_split=None, few_shots_select=None, generation_size=2048, - metric=[Metrics.quasi_exact_match_math, Metrics.maj_at_4_math], + metrics=[Metrics.quasi_exact_match_math, Metrics.maj_at_4_math], stop_sequence=["\n"], trust_dataset=True, version=1, @@ -9870,7 +9870,7 @@ few_shots_split=None, few_shots_select=None, generation_size=2048, - metric=[Metrics.quasi_exact_match_math, Metrics.maj_at_4_math], + metrics=[Metrics.quasi_exact_match_math, Metrics.maj_at_4_math], stop_sequence=["\n"], trust_dataset=True, version=1, @@ -9886,7 +9886,7 @@ few_shots_split=None, few_shots_select=None, generation_size=2048, - metric=[Metrics.quasi_exact_match_math, Metrics.maj_at_4_math], + metrics=[Metrics.quasi_exact_match_math, Metrics.maj_at_4_math], stop_sequence=["\n"], trust_dataset=True, version=1, @@ -9902,7 +9902,7 @@ few_shots_split=None, few_shots_select=None, generation_size=2048, - metric=[Metrics.quasi_exact_match_math, Metrics.maj_at_4_math], + metrics=[Metrics.quasi_exact_match_math, Metrics.maj_at_4_math], stop_sequence=["\n"], trust_dataset=True, version=1, @@ -9918,7 +9918,7 @@ few_shots_split=None, few_shots_select=None, generation_size=2048, - metric=[Metrics.quasi_exact_match_math, Metrics.maj_at_4_math], + metrics=[Metrics.quasi_exact_match_math, Metrics.maj_at_4_math], stop_sequence=["\n"], trust_dataset=True, version=1, @@ -9934,7 +9934,7 @@ few_shots_split=None, few_shots_select=None, generation_size=2048, - metric=[Metrics.quasi_exact_match_math, Metrics.maj_at_4_math], + metrics=[Metrics.quasi_exact_match_math, Metrics.maj_at_4_math], stop_sequence=["\n"], trust_dataset=True, version=1, @@ -9950,7 +9950,7 @@ few_shots_split=None, few_shots_select=None, generation_size=2048, - metric=[Metrics.quasi_exact_match_math, Metrics.maj_at_4_math], + metrics=[Metrics.quasi_exact_match_math, Metrics.maj_at_4_math], stop_sequence=["\n"], trust_dataset=True, version=1, @@ -9966,7 +9966,7 @@ few_shots_split=None, few_shots_select=None, generation_size=2048, - metric=[Metrics.quasi_exact_match_math, Metrics.maj_at_4_math], + metrics=[Metrics.quasi_exact_match_math, Metrics.maj_at_4_math], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -9982,7 +9982,7 @@ few_shots_split=None, few_shots_select=None, generation_size=2048, - metric=[Metrics.quasi_exact_match_math, Metrics.maj_at_4_math], + metrics=[Metrics.quasi_exact_match_math, Metrics.maj_at_4_math], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -9998,7 +9998,7 @@ few_shots_split=None, few_shots_select=None, generation_size=2048, - metric=[Metrics.quasi_exact_match_math, Metrics.maj_at_4_math], + metrics=[Metrics.quasi_exact_match_math, Metrics.maj_at_4_math], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10014,7 +10014,7 @@ few_shots_split=None, few_shots_select=None, generation_size=2048, - metric=[Metrics.quasi_exact_match_math, Metrics.maj_at_4_math], + metrics=[Metrics.quasi_exact_match_math, Metrics.maj_at_4_math], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10030,7 +10030,7 @@ few_shots_split=None, few_shots_select=None, generation_size=2048, - metric=[Metrics.quasi_exact_match_math, Metrics.maj_at_4_math], + metrics=[Metrics.quasi_exact_match_math, Metrics.maj_at_4_math], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10046,7 +10046,7 @@ few_shots_split=None, few_shots_select=None, generation_size=2048, - metric=[Metrics.quasi_exact_match_math, Metrics.maj_at_4_math], + metrics=[Metrics.quasi_exact_match_math, Metrics.maj_at_4_math], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10062,7 +10062,7 @@ few_shots_split=None, few_shots_select=None, generation_size=2048, - metric=[Metrics.quasi_exact_match_math, Metrics.maj_at_4_math], + metrics=[Metrics.quasi_exact_match_math, Metrics.maj_at_4_math], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10078,7 +10078,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10094,7 +10094,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10110,7 +10110,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.perfect_exact_match], + metrics=[Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10126,7 +10126,7 @@ few_shots_split=None, few_shots_select=None, generation_size=128, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.f1_score, @@ -10149,7 +10149,7 @@ few_shots_split=None, few_shots_select=None, generation_size=128, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.f1_score, @@ -10172,7 +10172,7 @@ few_shots_split=None, few_shots_select=None, generation_size=128, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.f1_score, @@ -10195,7 +10195,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.loglikelihood_acc, Metrics.exact_match, Metrics.quasi_exact_match, @@ -10217,7 +10217,7 @@ few_shots_split=None, few_shots_select=None, generation_size=512, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.f1_score, @@ -10240,7 +10240,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.loglikelihood_acc, Metrics.exact_match, Metrics.quasi_exact_match, @@ -10262,7 +10262,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10278,7 +10278,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10294,7 +10294,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.exact_match, Metrics.quasi_exact_match], + metrics=[Metrics.exact_match, Metrics.quasi_exact_match], stop_sequence=["\n", "=", "Question="], trust_dataset=True, version=0, @@ -10310,7 +10310,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.exact_match, Metrics.quasi_exact_match], + metrics=[Metrics.exact_match, Metrics.quasi_exact_match], stop_sequence=["\n", "=", "Pregunta="], trust_dataset=True, version=0, @@ -10326,7 +10326,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.exact_match, Metrics.quasi_exact_match], + metrics=[Metrics.exact_match, Metrics.quasi_exact_match], stop_sequence=["\n", "=", "Question="], trust_dataset=True, version=0, @@ -10342,7 +10342,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.exact_match, Metrics.quasi_exact_match], + metrics=[Metrics.exact_match, Metrics.quasi_exact_match], stop_sequence=["\n", "=", "Frage="], trust_dataset=True, version=0, @@ -10358,7 +10358,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.exact_match, Metrics.quasi_exact_match], + metrics=[Metrics.exact_match, Metrics.quasi_exact_match], stop_sequence=["\n", "=", "\u0417\u0430\u0434\u0430\u0447\u0430="], trust_dataset=True, version=0, @@ -10374,7 +10374,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.exact_match, Metrics.quasi_exact_match], + metrics=[Metrics.exact_match, Metrics.quasi_exact_match], stop_sequence=["\n", "=", "\u95ee\u9898="], trust_dataset=True, version=0, @@ -10390,7 +10390,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.exact_match, Metrics.quasi_exact_match], + metrics=[Metrics.exact_match, Metrics.quasi_exact_match], stop_sequence=["\n", "=", "\u554f\u984c="], trust_dataset=True, version=0, @@ -10406,7 +10406,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.exact_match, Metrics.quasi_exact_match], + metrics=[Metrics.exact_match, Metrics.quasi_exact_match], stop_sequence=["\n", "=", "\u0e42\u0e08\u0e17\u0e22\u0e4c="], trust_dataset=True, version=0, @@ -10422,7 +10422,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.exact_match, Metrics.quasi_exact_match], + metrics=[Metrics.exact_match, Metrics.quasi_exact_match], stop_sequence=["\n", "=", "Swali="], trust_dataset=True, version=0, @@ -10438,7 +10438,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.exact_match, Metrics.quasi_exact_match], + metrics=[Metrics.exact_match, Metrics.quasi_exact_match], stop_sequence=["\n", "=", "\u09aa\u09cd\u09b0\u09b6\u09cd\u09a8="], trust_dataset=True, version=0, @@ -10454,7 +10454,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.exact_match, Metrics.quasi_exact_match], + metrics=[Metrics.exact_match, Metrics.quasi_exact_match], stop_sequence=["\n", "=", "\u0c2a\u0c4d\u0c30\u0c36\u0c4d\u0c28="], trust_dataset=True, version=0, @@ -10470,7 +10470,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.rouge_t5], + metrics=[Metrics.loglikelihood_acc, Metrics.rouge_t5], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10486,7 +10486,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10502,7 +10502,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10518,7 +10518,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10534,7 +10534,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10550,7 +10550,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -10571,7 +10571,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10587,7 +10587,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10603,7 +10603,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -10624,7 +10624,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10640,7 +10640,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10656,7 +10656,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -10677,7 +10677,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10693,7 +10693,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10709,7 +10709,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -10730,7 +10730,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10746,7 +10746,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10762,7 +10762,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -10783,7 +10783,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10799,7 +10799,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10815,7 +10815,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -10836,7 +10836,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10852,7 +10852,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10868,7 +10868,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -10889,7 +10889,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10905,7 +10905,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10921,7 +10921,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -10942,7 +10942,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10958,7 +10958,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10974,7 +10974,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -10995,7 +10995,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11011,7 +11011,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11027,7 +11027,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -11048,7 +11048,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11064,7 +11064,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11080,7 +11080,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -11101,7 +11101,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11117,7 +11117,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11133,7 +11133,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -11154,7 +11154,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11170,7 +11170,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11186,7 +11186,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -11207,7 +11207,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11223,7 +11223,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11239,7 +11239,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -11260,7 +11260,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11276,7 +11276,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11292,7 +11292,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -11313,7 +11313,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11329,7 +11329,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11345,7 +11345,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -11366,7 +11366,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11382,7 +11382,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11398,7 +11398,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -11419,7 +11419,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11435,7 +11435,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11451,7 +11451,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -11472,7 +11472,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11488,7 +11488,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11504,7 +11504,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -11525,7 +11525,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11541,7 +11541,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11557,7 +11557,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -11578,7 +11578,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11594,7 +11594,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11610,7 +11610,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -11631,7 +11631,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11647,7 +11647,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11663,7 +11663,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -11684,7 +11684,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11700,7 +11700,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11716,7 +11716,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -11737,7 +11737,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11753,7 +11753,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11769,7 +11769,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -11790,7 +11790,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11806,7 +11806,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11822,7 +11822,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -11843,7 +11843,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11859,7 +11859,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11875,7 +11875,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -11896,7 +11896,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11912,7 +11912,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11928,7 +11928,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -11949,7 +11949,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11965,7 +11965,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11981,7 +11981,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -12002,7 +12002,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12018,7 +12018,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12034,7 +12034,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -12055,7 +12055,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12071,7 +12071,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12087,7 +12087,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -12108,7 +12108,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12124,7 +12124,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12140,7 +12140,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -12161,7 +12161,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12177,7 +12177,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12193,7 +12193,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -12214,7 +12214,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12230,7 +12230,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12246,7 +12246,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -12267,7 +12267,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12283,7 +12283,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12299,7 +12299,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -12320,7 +12320,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12336,7 +12336,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12352,7 +12352,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -12373,7 +12373,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12389,7 +12389,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12405,7 +12405,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -12426,7 +12426,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12442,7 +12442,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12458,7 +12458,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -12479,7 +12479,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12495,7 +12495,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12511,7 +12511,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -12532,7 +12532,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12548,7 +12548,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12564,7 +12564,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -12585,7 +12585,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12601,7 +12601,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12617,7 +12617,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -12638,7 +12638,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12654,7 +12654,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12670,7 +12670,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -12691,7 +12691,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12707,7 +12707,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12723,7 +12723,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -12744,7 +12744,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12760,7 +12760,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12776,7 +12776,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -12797,7 +12797,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12813,7 +12813,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12829,7 +12829,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -12850,7 +12850,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12866,7 +12866,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12882,7 +12882,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -12903,7 +12903,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12919,7 +12919,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12935,7 +12935,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -12956,7 +12956,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12972,7 +12972,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12988,7 +12988,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -13009,7 +13009,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13025,7 +13025,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13041,7 +13041,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -13062,7 +13062,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13078,7 +13078,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13094,7 +13094,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -13115,7 +13115,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13131,7 +13131,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13147,7 +13147,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -13168,7 +13168,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13184,7 +13184,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13200,7 +13200,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -13221,7 +13221,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13237,7 +13237,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13253,7 +13253,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -13274,7 +13274,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13290,7 +13290,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13306,7 +13306,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -13327,7 +13327,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13343,7 +13343,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13359,7 +13359,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -13380,7 +13380,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13396,7 +13396,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13412,7 +13412,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -13433,7 +13433,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13449,7 +13449,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13465,7 +13465,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -13486,7 +13486,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13502,7 +13502,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13518,7 +13518,7 @@ few_shots_split="dev", few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -13539,7 +13539,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13555,7 +13555,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.perfect_exact_match], + metrics=[Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13571,7 +13571,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13587,7 +13587,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13603,7 +13603,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13619,7 +13619,7 @@ few_shots_split=None, few_shots_select=None, generation_size=200, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13635,7 +13635,7 @@ few_shots_split=None, few_shots_select=None, generation_size=200, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13651,7 +13651,7 @@ few_shots_split=None, few_shots_select=None, generation_size=200, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13667,7 +13667,7 @@ few_shots_split=None, few_shots_select=None, generation_size=200, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13683,7 +13683,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.perfect_exact_match], + metrics=[Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13699,7 +13699,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13715,7 +13715,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13731,7 +13731,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13747,7 +13747,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13763,7 +13763,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.recall_at_1, Metrics.recall_at_2, Metrics.mrr], + metrics=[Metrics.recall_at_1, Metrics.recall_at_2, Metrics.mrr], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13779,7 +13779,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.recall_at_1, Metrics.recall_at_2, Metrics.mrr], + metrics=[Metrics.recall_at_1, Metrics.recall_at_2, Metrics.mrr], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13795,7 +13795,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.f1_score, @@ -13818,7 +13818,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.bleu, Metrics.rouge_t5], + metrics=[Metrics.bleu, Metrics.rouge_t5], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13836,7 +13836,7 @@ few_shots_split="few_shot", generation_size=250, stop_sequence=["\n", "Question:", "question:"], - metric=( + metrics=( Metrics.prefix_quasi_exact_match, Metrics.f1_score_quasi, ), @@ -13852,7 +13852,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13868,7 +13868,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13884,7 +13884,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13900,7 +13900,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[Metrics.exact_match, Metrics.quasi_exact_match], + metrics=[Metrics.exact_match, Metrics.quasi_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13916,7 +13916,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[Metrics.exact_match, Metrics.quasi_exact_match], + metrics=[Metrics.exact_match, Metrics.quasi_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13932,7 +13932,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[Metrics.exact_match, Metrics.quasi_exact_match], + metrics=[Metrics.exact_match, Metrics.quasi_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13948,7 +13948,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[Metrics.exact_match, Metrics.quasi_exact_match], + metrics=[Metrics.exact_match, Metrics.quasi_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13964,7 +13964,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[Metrics.exact_match, Metrics.quasi_exact_match], + metrics=[Metrics.exact_match, Metrics.quasi_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13980,7 +13980,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[Metrics.exact_match, Metrics.quasi_exact_match], + metrics=[Metrics.exact_match, Metrics.quasi_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13996,7 +13996,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[Metrics.exact_match, Metrics.quasi_exact_match], + metrics=[Metrics.exact_match, Metrics.quasi_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14012,7 +14012,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[Metrics.exact_match, Metrics.quasi_exact_match], + metrics=[Metrics.exact_match, Metrics.quasi_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14028,7 +14028,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.perfect_exact_match], + metrics=[Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14044,7 +14044,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14060,7 +14060,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -14081,7 +14081,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14097,7 +14097,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[Metrics.perfect_exact_match], + metrics=[Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14113,7 +14113,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.perfect_exact_match], + metrics=[Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14129,7 +14129,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14145,7 +14145,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.perfect_exact_match], + metrics=[Metrics.perfect_exact_match], stop_sequence=None, trust_dataset=True, version=0, @@ -14161,7 +14161,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.perfect_exact_match], + metrics=[Metrics.loglikelihood_acc, Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14177,7 +14177,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.perfect_exact_match], + metrics=[Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14193,7 +14193,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14209,7 +14209,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14225,7 +14225,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14241,7 +14241,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14257,7 +14257,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.bleu, Metrics.perfect_exact_match], + metrics=[Metrics.bleu, Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14273,7 +14273,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14289,7 +14289,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -14310,7 +14310,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14326,7 +14326,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.f1_score], + metrics=[Metrics.f1_score], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14342,7 +14342,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14358,7 +14358,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14374,7 +14374,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14390,7 +14390,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -14411,7 +14411,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14427,7 +14427,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14443,7 +14443,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14459,7 +14459,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.bleurt, Metrics.bleu, Metrics.rouge_t5, Metrics.perfect_exact_match], + metrics=[Metrics.bleurt, Metrics.bleu, Metrics.rouge_t5, Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14475,7 +14475,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[Metrics.f1_score_quasi], + metrics=[Metrics.f1_score_quasi], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14491,7 +14491,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14507,7 +14507,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.exact_match, Metrics.quasi_exact_match, Metrics.f1_score], + metrics=[Metrics.exact_match, Metrics.quasi_exact_match, Metrics.f1_score], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14523,7 +14523,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14539,7 +14539,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14555,7 +14555,7 @@ few_shots_split=None, few_shots_select=None, generation_size=30, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -14578,7 +14578,7 @@ few_shots_split=None, few_shots_select=None, generation_size=30, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -14601,7 +14601,7 @@ few_shots_split=None, few_shots_select=None, generation_size=30, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -14624,7 +14624,7 @@ few_shots_split=None, few_shots_select=None, generation_size=30, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -14647,7 +14647,7 @@ few_shots_split=None, few_shots_select=None, generation_size=30, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -14670,7 +14670,7 @@ few_shots_split=None, few_shots_select=None, generation_size=30, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -14693,7 +14693,7 @@ few_shots_split=None, few_shots_select=None, generation_size=30, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -14716,7 +14716,7 @@ few_shots_split=None, few_shots_select=None, generation_size=30, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -14739,7 +14739,7 @@ few_shots_split=None, few_shots_select=None, generation_size=30, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -14762,7 +14762,7 @@ few_shots_split=None, few_shots_select=None, generation_size=30, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -14785,7 +14785,7 @@ few_shots_split=None, few_shots_select=None, generation_size=30, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -14808,7 +14808,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14824,7 +14824,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[Metrics.prediction_perplexity], + metrics=[Metrics.prediction_perplexity], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14840,7 +14840,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14856,7 +14856,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.perfect_exact_match], + metrics=[Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14872,7 +14872,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.rouge_t5, Metrics.bleu, Metrics.loglikelihood_acc, Metrics.perfect_exact_match], + metrics=[Metrics.rouge_t5, Metrics.bleu, Metrics.loglikelihood_acc, Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14888,7 +14888,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14904,7 +14904,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14920,7 +14920,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14936,7 +14936,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14952,7 +14952,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.bleu, Metrics.perfect_exact_match], + metrics=[Metrics.bleu, Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14968,7 +14968,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14984,7 +14984,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.bleu, Metrics.rouge_t5, Metrics.perfect_exact_match], + metrics=[Metrics.bleu, Metrics.rouge_t5, Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15000,7 +15000,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.bleu, Metrics.rouge_t5, Metrics.perfect_exact_match], + metrics=[Metrics.bleu, Metrics.rouge_t5, Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15016,7 +15016,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15032,7 +15032,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.bleu, Metrics.rouge_t5, Metrics.loglikelihood_acc], + metrics=[Metrics.bleu, Metrics.rouge_t5, Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15048,7 +15048,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.perfect_exact_match], + metrics=[Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15064,7 +15064,7 @@ few_shots_split="few_shot", few_shots_select=None, generation_size=2048, - metric=[Metrics.simpleqa_judge], + metrics=[Metrics.simpleqa_judge], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15080,7 +15080,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.perfect_exact_match], + metrics=[Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15096,7 +15096,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15112,7 +15112,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.perfect_exact_match], + metrics=[Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15128,7 +15128,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.bleu, Metrics.perfect_exact_match], + metrics=[Metrics.bleu, Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15144,7 +15144,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15160,7 +15160,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.perfect_exact_match], + metrics=[Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15176,7 +15176,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -15197,7 +15197,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15213,7 +15213,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15229,7 +15229,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.f1_score_macro], + metrics=[Metrics.f1_score_macro], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15245,7 +15245,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15268,7 +15268,7 @@ few_shots_split="train", stop_sequence=["\n", "Question:", "question:"], generation_size=200, - metric=( + metrics=( Metrics.prefix_quasi_exact_match, Metrics.f1_score_quasi, ), @@ -15284,7 +15284,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15300,7 +15300,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15316,7 +15316,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15332,7 +15332,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.bleu, Metrics.rouge_t5, Metrics.loglikelihood_acc], + metrics=[Metrics.bleu, Metrics.rouge_t5, Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15348,7 +15348,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.perfect_exact_match], + metrics=[Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15364,7 +15364,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15380,7 +15380,7 @@ few_shots_split=None, few_shots_select=None, generation_size=128, - metric=[ + metrics=[ Metrics.rouge1, Metrics.rouge2, Metrics.rougeL, @@ -15403,7 +15403,7 @@ few_shots_split=None, few_shots_select=None, generation_size=64, - metric=[ + metrics=[ Metrics.rouge1, Metrics.rouge2, Metrics.rougeL, @@ -15426,7 +15426,7 @@ few_shots_split=None, few_shots_select=None, generation_size=64, - metric=[ + metrics=[ Metrics.rouge1, Metrics.rouge2, Metrics.rougeL, @@ -15449,7 +15449,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15465,7 +15465,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.multi_f1_numeric], + metrics=[Metrics.loglikelihood_acc, Metrics.multi_f1_numeric], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15481,7 +15481,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15497,7 +15497,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15513,7 +15513,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15529,7 +15529,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15545,7 +15545,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15561,7 +15561,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15577,7 +15577,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm_nospace], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15593,7 +15593,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15609,7 +15609,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15625,7 +15625,7 @@ few_shots_split=None, few_shots_select=None, generation_size=50, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -15646,7 +15646,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[Metrics.exact_match, Metrics.f1_score], + metrics=[Metrics.exact_match, Metrics.f1_score], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15662,7 +15662,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[Metrics.exact_match, Metrics.f1_score], + metrics=[Metrics.exact_match, Metrics.f1_score], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15678,7 +15678,7 @@ few_shots_split=None, few_shots_select=None, generation_size=50, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -15699,7 +15699,7 @@ few_shots_split=None, few_shots_select=None, generation_size=50, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -15720,7 +15720,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.bleu, Metrics.rouge_t5], + metrics=[Metrics.bleu, Metrics.rouge_t5], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15736,7 +15736,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15752,7 +15752,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.perfect_exact_match], + metrics=[Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15768,7 +15768,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15784,7 +15784,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15800,7 +15800,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15816,7 +15816,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15832,7 +15832,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15848,7 +15848,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15864,7 +15864,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15880,7 +15880,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15896,7 +15896,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15912,7 +15912,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15928,7 +15928,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15944,7 +15944,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15960,7 +15960,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15976,7 +15976,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15992,7 +15992,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16008,7 +16008,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16024,7 +16024,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16040,7 +16040,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16056,7 +16056,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16072,7 +16072,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16088,7 +16088,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16104,7 +16104,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16120,7 +16120,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16136,7 +16136,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16152,7 +16152,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16168,7 +16168,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16184,7 +16184,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16200,7 +16200,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16216,7 +16216,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16232,7 +16232,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16248,7 +16248,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16264,7 +16264,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16280,7 +16280,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16296,7 +16296,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16312,7 +16312,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16328,7 +16328,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16344,7 +16344,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16360,7 +16360,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16376,7 +16376,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16392,7 +16392,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16408,7 +16408,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16424,7 +16424,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16440,7 +16440,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], + metrics=[Metrics.loglikelihood_acc, Metrics.loglikelihood_acc_norm], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16456,7 +16456,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.bleu, Metrics.rouge_t5, Metrics.loglikelihood_acc, Metrics.bleurt], + metrics=[Metrics.bleu, Metrics.rouge_t5, Metrics.loglikelihood_acc, Metrics.bleurt], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16472,7 +16472,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16488,7 +16488,7 @@ few_shots_split=None, few_shots_select=None, generation_size=20, - metric=[Metrics.quasi_exact_match_triviaqa], + metrics=[Metrics.quasi_exact_match_triviaqa], stop_sequence=["\n", ".", ","], trust_dataset=True, version=0, @@ -16504,7 +16504,7 @@ few_shots_split=None, few_shots_select=None, generation_size=200, - metric=[Metrics.bleu, Metrics.rouge_t5], + metrics=[Metrics.bleu, Metrics.rouge_t5], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16520,7 +16520,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.truthfulqa_mc_metrics], + metrics=[Metrics.truthfulqa_mc_metrics], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16536,7 +16536,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[ + metrics=[ Metrics.loglikelihood_acc, Metrics.exact_match, Metrics.quasi_exact_match, @@ -16558,7 +16558,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16574,7 +16574,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16590,7 +16590,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16606,7 +16606,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16622,7 +16622,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16638,7 +16638,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16654,7 +16654,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.perfect_exact_match], + metrics=[Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16670,7 +16670,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[Metrics.perfect_exact_match], + metrics=[Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16686,7 +16686,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[Metrics.perfect_exact_match], + metrics=[Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16702,7 +16702,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[Metrics.perfect_exact_match], + metrics=[Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16718,7 +16718,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[Metrics.perfect_exact_match], + metrics=[Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16734,7 +16734,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[Metrics.perfect_exact_match], + metrics=[Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16750,7 +16750,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16766,7 +16766,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.acc_golds_likelihood], + metrics=[Metrics.acc_golds_likelihood], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16782,7 +16782,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16798,7 +16798,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -16814,7 +16814,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -16835,7 +16835,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -16856,7 +16856,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -16877,7 +16877,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -16898,7 +16898,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -16919,7 +16919,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -16940,7 +16940,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -16961,7 +16961,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -16982,7 +16982,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17003,7 +17003,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17024,7 +17024,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17045,7 +17045,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17066,7 +17066,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17087,7 +17087,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17108,7 +17108,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17129,7 +17129,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17150,7 +17150,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17171,7 +17171,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17192,7 +17192,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17213,7 +17213,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17234,7 +17234,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17255,7 +17255,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17276,7 +17276,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17297,7 +17297,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17318,7 +17318,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17339,7 +17339,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17360,7 +17360,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17381,7 +17381,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17402,7 +17402,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17423,7 +17423,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17444,7 +17444,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17465,7 +17465,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17486,7 +17486,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17507,7 +17507,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17528,7 +17528,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17549,7 +17549,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17570,7 +17570,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17591,7 +17591,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17612,7 +17612,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17633,7 +17633,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17654,7 +17654,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17675,7 +17675,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17696,7 +17696,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17717,7 +17717,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17738,7 +17738,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17759,7 +17759,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17780,7 +17780,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17801,7 +17801,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17822,7 +17822,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17843,7 +17843,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17864,7 +17864,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17885,7 +17885,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17906,7 +17906,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17927,7 +17927,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17948,7 +17948,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17969,7 +17969,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -17990,7 +17990,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -18011,7 +18011,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -18032,7 +18032,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -18053,7 +18053,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -18074,7 +18074,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -18095,7 +18095,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -18116,7 +18116,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -18137,7 +18137,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -18158,7 +18158,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -18179,7 +18179,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -18200,7 +18200,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -18221,7 +18221,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -18242,7 +18242,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -18263,7 +18263,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -18284,7 +18284,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -18305,7 +18305,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -18326,7 +18326,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -18347,7 +18347,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -18368,7 +18368,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -18389,7 +18389,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -18410,7 +18410,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -18431,7 +18431,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -18452,7 +18452,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -18473,7 +18473,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -18494,7 +18494,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -18515,7 +18515,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -18536,7 +18536,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -18557,7 +18557,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -18578,7 +18578,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -18599,7 +18599,7 @@ few_shots_split=None, few_shots_select=None, generation_size=8, - metric=[ + metrics=[ Metrics.exact_match, Metrics.quasi_exact_match, Metrics.prefix_exact_match, @@ -18620,7 +18620,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -18636,7 +18636,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -18652,7 +18652,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], + metrics=[Metrics.word_perplexity, Metrics.byte_perplexity, Metrics.bits_per_byte], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -18668,7 +18668,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -18684,7 +18684,7 @@ few_shots_split=None, few_shots_select="random_sampling", generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -18700,7 +18700,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -18716,7 +18716,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -18732,7 +18732,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -18748,7 +18748,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -18764,7 +18764,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -18780,7 +18780,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -18796,7 +18796,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -18812,7 +18812,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -18828,7 +18828,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -18844,7 +18844,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -18860,7 +18860,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -18876,7 +18876,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -18892,7 +18892,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -18908,7 +18908,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -18924,7 +18924,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -18940,7 +18940,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -18956,7 +18956,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -18972,7 +18972,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -18988,7 +18988,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19004,7 +19004,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19020,7 +19020,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19036,7 +19036,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19052,7 +19052,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19068,7 +19068,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19084,7 +19084,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19100,7 +19100,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19116,7 +19116,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19132,7 +19132,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19148,7 +19148,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19164,7 +19164,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19180,7 +19180,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19196,7 +19196,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19212,7 +19212,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19228,7 +19228,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19244,7 +19244,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19260,7 +19260,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19276,7 +19276,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19292,7 +19292,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19308,7 +19308,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19324,7 +19324,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19340,7 +19340,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19356,7 +19356,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19372,7 +19372,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19388,7 +19388,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19404,7 +19404,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19420,7 +19420,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19436,7 +19436,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19452,7 +19452,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19468,7 +19468,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19484,7 +19484,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19500,7 +19500,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19516,7 +19516,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19532,7 +19532,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19548,7 +19548,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19564,7 +19564,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19580,7 +19580,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19596,7 +19596,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19612,7 +19612,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19628,7 +19628,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19644,7 +19644,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19660,7 +19660,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19676,7 +19676,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19692,7 +19692,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19708,7 +19708,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19724,7 +19724,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19740,7 +19740,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19756,7 +19756,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19772,7 +19772,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19788,7 +19788,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19804,7 +19804,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.bleu], + metrics=[Metrics.bleu], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19820,7 +19820,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.bleu], + metrics=[Metrics.bleu], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19836,7 +19836,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.bleu], + metrics=[Metrics.bleu], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19852,7 +19852,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.bleu], + metrics=[Metrics.bleu], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19868,7 +19868,7 @@ few_shots_split=None, few_shots_select=None, generation_size=100, - metric=[Metrics.bleu], + metrics=[Metrics.bleu], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19884,7 +19884,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19900,7 +19900,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19916,7 +19916,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19932,7 +19932,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19948,7 +19948,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19964,7 +19964,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19980,7 +19980,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -19996,7 +19996,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20012,7 +20012,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20028,7 +20028,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20044,7 +20044,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20060,7 +20060,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20076,7 +20076,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20092,7 +20092,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20108,7 +20108,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20124,7 +20124,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20140,7 +20140,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20156,7 +20156,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20172,7 +20172,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20188,7 +20188,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20204,7 +20204,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20220,7 +20220,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20236,7 +20236,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20252,7 +20252,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20268,7 +20268,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20284,7 +20284,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20300,7 +20300,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20316,7 +20316,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20332,7 +20332,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20348,7 +20348,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20364,7 +20364,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20380,7 +20380,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20396,7 +20396,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20412,7 +20412,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20428,7 +20428,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20444,7 +20444,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20460,7 +20460,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20476,7 +20476,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20492,7 +20492,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20508,7 +20508,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20524,7 +20524,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20540,7 +20540,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20556,7 +20556,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20572,7 +20572,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20588,7 +20588,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20604,7 +20604,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20620,7 +20620,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20636,7 +20636,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20652,7 +20652,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20668,7 +20668,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20684,7 +20684,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20700,7 +20700,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20716,7 +20716,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20732,7 +20732,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20748,7 +20748,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20764,7 +20764,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20780,7 +20780,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20796,7 +20796,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20812,7 +20812,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20828,7 +20828,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20844,7 +20844,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20860,7 +20860,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20876,7 +20876,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20892,7 +20892,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20908,7 +20908,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20924,7 +20924,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20940,7 +20940,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20956,7 +20956,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20972,7 +20972,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -20988,7 +20988,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21004,7 +21004,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21020,7 +21020,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21036,7 +21036,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21052,7 +21052,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21068,7 +21068,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21084,7 +21084,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21100,7 +21100,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21116,7 +21116,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21132,7 +21132,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21148,7 +21148,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21164,7 +21164,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21180,7 +21180,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21196,7 +21196,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21212,7 +21212,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21228,7 +21228,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21244,7 +21244,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21260,7 +21260,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21276,7 +21276,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21292,7 +21292,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21308,7 +21308,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21324,7 +21324,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21340,7 +21340,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21356,7 +21356,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21372,7 +21372,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21388,7 +21388,7 @@ few_shots_split=None, few_shots_select=None, generation_size=None, - metric=[Metrics.bleu, Metrics.chrf, Metrics.ter], + metrics=[Metrics.bleu, Metrics.chrf, Metrics.ter], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21404,7 +21404,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.perfect_exact_match], + metrics=[Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21420,7 +21420,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.perfect_exact_match], + metrics=[Metrics.perfect_exact_match], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21436,7 +21436,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21452,7 +21452,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21468,7 +21468,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21484,7 +21484,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21500,7 +21500,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21516,7 +21516,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21532,7 +21532,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21548,7 +21548,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21564,7 +21564,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21580,7 +21580,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21596,7 +21596,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21612,7 +21612,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21628,7 +21628,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21644,7 +21644,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21660,7 +21660,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21676,7 +21676,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21692,7 +21692,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21708,7 +21708,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21724,7 +21724,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21740,7 +21740,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21756,7 +21756,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21772,7 +21772,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21788,7 +21788,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21804,7 +21804,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21820,7 +21820,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21836,7 +21836,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21852,7 +21852,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21868,7 +21868,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21884,7 +21884,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -21900,7 +21900,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metrics=[Metrics.loglikelihood_acc], stop_sequence=["\n"], trust_dataset=True, version=0, diff --git a/src/lighteval/tasks/extended/hle/main.py b/src/lighteval/tasks/extended/hle/main.py index 76a63c1ad..2c36607da 100644 --- a/src/lighteval/tasks/extended/hle/main.py +++ b/src/lighteval/tasks/extended/hle/main.py @@ -31,13 +31,9 @@ from lighteval.metrics.metrics import Metrics from lighteval.metrics.metrics_sample import JudgeLLM -from lighteval.metrics.utils.metric_utils import ( - CorpusLevelMetricGrouping, - MetricCategory, - MetricUseCase, -) +from lighteval.metrics.utils.metric_utils import CorpusLevelMetricGrouping from lighteval.tasks.lighteval_task import LightevalTaskConfig -from lighteval.tasks.requests import Doc +from lighteval.tasks.requests import Doc, SamplingMethod logger = logging.getLogger(__name__) @@ -195,7 +191,7 @@ def calib_err(confidence, correct, p="2", beta=100): def hle_text_only(line, task_name: str = None): if line["image"] not in [None, ""]: - return + return None return Doc( task_name=task_name, @@ -209,8 +205,7 @@ def hle_text_only(line, task_name: str = None): hle_metrics = CorpusLevelMetricGrouping( metric_name=["accuracy", "confidence_half_width", "calibration_error"], higher_is_better=dict.fromkeys(["accuracy", "confidence_half_width", "calibration_error"], True), - category=MetricCategory.LLM_AS_JUDGE, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.GENERATIVE, sample_level_fn=JudgeLLMHLE().compute, corpus_level_fn=JudgeLLMHLE().compute_corpus, ) @@ -227,7 +222,7 @@ def hle_text_only(line, task_name: str = None): few_shots_split=None, few_shots_select=None, generation_size=8192, - metric=[Metrics.exact_match, Metrics.hle_metrics], + metrics=[Metrics.exact_match, Metrics.hle_metrics], stop_sequence=[], trust_dataset=True, version=0, diff --git a/src/lighteval/tasks/extended/ifeval/main.py b/src/lighteval/tasks/extended/ifeval/main.py index 2d9ecc231..79b283dbf 100644 --- a/src/lighteval/tasks/extended/ifeval/main.py +++ b/src/lighteval/tasks/extended/ifeval/main.py @@ -27,17 +27,16 @@ import lighteval.tasks.extended.ifeval.instructions_registry as instructions_registry from lighteval.metrics.metrics import Metrics from lighteval.metrics.utils.metric_utils import ( - MetricCategory, - MetricUseCase, SampleLevelMetricGrouping, ) +from lighteval.models.model_output import ModelResponse from lighteval.tasks.lighteval_task import LightevalTaskConfig -from lighteval.tasks.requests import Doc +from lighteval.tasks.requests import Doc, SamplingMethod from lighteval.utils.utils import remove_reasoning_tags # Very specific task where there are no precise outputs but instead we test if the format obeys rules -def ifeval_prompt(line, task_name: str = None): +def ifeval_prompt(line, task_name: str = ""): return Doc( task_name=task_name, query=line["prompt"], @@ -60,15 +59,15 @@ def ifeval_prompt(line, task_name: str = None): ] -def ifeval_metric(predictions: list[str], formatted_doc: Doc, **kwargs) -> dict: - response = predictions[0] +def ifeval_metric(doc: Doc, model_response: ModelResponse, **kwargs) -> dict: + response = model_response.text[0] # Remove the reasoning block to avoid false negatives: https://github.com/huggingface/lighteval/issues/790 response = remove_reasoning_tags(response, REASONING_TAG_PAIRS) # Strict instructions - instruction_list = formatted_doc.specific["instructions_id_list"] - all_kwargs = formatted_doc.specific["kwargs"] - prompt = formatted_doc.query + instruction_list = doc.specific["instructions_id_list"] + all_kwargs = doc.specific["kwargs"] + prompt = doc.query # Loose instructions r = response.split("\n") @@ -136,8 +135,7 @@ def agg_inst_level_acc(items): ifeval_metrics = SampleLevelMetricGrouping( metric_name=submetric_names, higher_is_better=dict.fromkeys(submetric_names, True), - category=MetricCategory.GENERATIVE, - use_case=MetricUseCase.ACCURACY, + category=SamplingMethod.GENERATIVE, sample_level_fn=ifeval_metric, corpus_level_fn={ "prompt_level_strict_acc": np.mean, @@ -154,7 +152,7 @@ def agg_inst_level_acc(items): suite=["extended"], hf_repo="google/IFEval", hf_subset="default", - metric=[ifeval_metrics], + metrics=[ifeval_metrics], hf_avail_splits=["train"], evaluation_splits=["train"], few_shots_split="train", diff --git a/src/lighteval/tasks/extended/lcb/main.py b/src/lighteval/tasks/extended/lcb/main.py index 5682467ac..66a6844c4 100644 --- a/src/lighteval/tasks/extended/lcb/main.py +++ b/src/lighteval/tasks/extended/lcb/main.py @@ -35,13 +35,14 @@ import numpy as np from aenum import extend_enum -from lighteval.metrics.metrics import MetricCategory, Metrics, MetricUseCase, SampleLevelMetric +from lighteval.metrics.metrics import Metrics, SampleLevelMetric from lighteval.tasks.extended.lcb.codegen_metrics import ( codegen_metrics, extract_code, translate_private_test_cases, ) from lighteval.tasks.lighteval_task import Doc, LightevalTaskConfig +from lighteval.tasks.requests import SamplingMethod def prepare_prompt(line: dict[str, Any]) -> str: @@ -104,8 +105,7 @@ def codegen_metric(predictions: list[str], formatted_doc: Doc, **kwargs) -> floa lcb_codegen_metric = SampleLevelMetric( metric_name="codegen_pass@1:16", # This is the way of informing the number of generations currently - category=MetricCategory.GENERATIVE_SAMPLING, - use_case=MetricUseCase.REASONING, + category=SamplingMethod.GENERATIVE, higher_is_better=True, sample_level_fn=codegen_metric, corpus_level_fn=np.mean, @@ -154,7 +154,7 @@ def codegen_metric(predictions: list[str], formatted_doc: Doc, **kwargs) -> floa hf_avail_splits=["test"], evaluation_splits=["test"], generation_size=32768, - metric=[Metrics.lcb_codegen_metric], + metrics=[Metrics.lcb_codegen_metric], stop_sequence=[], # no stop sequence, will use EOS token trust_dataset=True, version=0, diff --git a/src/lighteval/tasks/extended/mix_eval/main.py b/src/lighteval/tasks/extended/mix_eval/main.py index eaa58f2a5..0e108f90c 100644 --- a/src/lighteval/tasks/extended/mix_eval/main.py +++ b/src/lighteval/tasks/extended/mix_eval/main.py @@ -26,7 +26,7 @@ import numpy as np from lighteval.metrics.metrics_sample import JudgeLLMMixEval -from lighteval.metrics.utils.metric_utils import MetricCategory, MetricUseCase, SampleLevelMetricGrouping +from lighteval.metrics.utils.metric_utils import SampleLevelMetricGrouping from lighteval.tasks.extended.mix_eval.judge_prompts import ( flow_judge_for_freeform_template, flow_judge_for_multichoice_template, @@ -35,7 +35,7 @@ ) from lighteval.tasks.extended.mix_eval.prompts import construct_prompt_freeform, construct_prompt_multichoice from lighteval.tasks.lighteval_task import LightevalTaskConfig -from lighteval.tasks.requests import Doc +from lighteval.tasks.requests import Doc, SamplingMethod logger = logging.getLogger(__name__) @@ -104,8 +104,7 @@ def process_judge_response_freeform_gpt(x): llm_judge_mixeval_multichoice_flow_judge = SampleLevelMetricGrouping( metric_name=["llm_judge_mixeval_flow"], higher_is_better={"judge_score_flow": True}, - category=MetricCategory.LLM_AS_JUDGE, - use_case=MetricUseCase.SUMMARIZATION, + category=SamplingMethod.GENERATIVE, sample_level_fn=JudgeLLMMixEval( judge_model_name="flowaicom/Flow-Judge-v0.1", template=flow_judge_for_multichoice_template, @@ -121,8 +120,7 @@ def process_judge_response_freeform_gpt(x): llm_judge_mixeval_multichoice_gpt_judge = SampleLevelMetricGrouping( metric_name=["llm_judge_mixeval_gpt3"], higher_is_better={"judge_score_gpt-3.5": True}, - category=MetricCategory.LLM_AS_JUDGE, - use_case=MetricUseCase.SUMMARIZATION, + category=SamplingMethod.GENERATIVE, sample_level_fn=JudgeLLMMixEval( judge_model_name="gpt-3.5-turbo", template=gpt_judge_for_closeended_multiplechoice, @@ -143,8 +141,7 @@ def mean_dv_5(x): llm_judge_mixeval_freeform_flow_judge = SampleLevelMetricGrouping( metric_name=["llm_judge_mixeval_flow"], higher_is_better={"judge_score": True}, - category=MetricCategory.LLM_AS_JUDGE, - use_case=MetricUseCase.SUMMARIZATION, + category=SamplingMethod.GENERATIVE, sample_level_fn=JudgeLLMMixEval( judge_model_name="flowaicom/Flow-Judge-v0.1", template=flow_judge_for_freeform_template, @@ -160,8 +157,7 @@ def mean_dv_5(x): llm_judge_mixeval_freeform_gpt_judge = SampleLevelMetricGrouping( metric_name=["llm_judge_mixeval_gpt3"], higher_is_better={"judge_score_gpt-3.5": True}, - category=MetricCategory.LLM_AS_JUDGE, - use_case=MetricUseCase.SUMMARIZATION, + category=SamplingMethod.GENERATIVE, sample_level_fn=JudgeLLMMixEval( judge_model_name="gpt-3.5-turbo", template=gpt_judge_for_closeended_freeform, @@ -181,7 +177,7 @@ def mean_dv_5(x): suite=["extended"], hf_repo="MixEval/MixEval", hf_subset="MixEval", - metric=[llm_judge_mixeval_freeform_flow_judge, llm_judge_mixeval_freeform_gpt_judge], + metrics=[llm_judge_mixeval_freeform_flow_judge, llm_judge_mixeval_freeform_gpt_judge], hf_avail_splits=["free_form"], evaluation_splits=["free_form"], few_shots_split=None, @@ -198,7 +194,7 @@ def mean_dv_5(x): suite=["extended"], hf_repo="MixEval/MixEval", hf_subset="MixEval", - metric=[llm_judge_mixeval_multichoice_flow_judge, llm_judge_mixeval_multichoice_gpt_judge], + metrics=[llm_judge_mixeval_multichoice_flow_judge, llm_judge_mixeval_multichoice_gpt_judge], hf_avail_splits=["multiple_choice"], evaluation_splits=["multiple_choice"], few_shots_split=None, @@ -214,7 +210,7 @@ def mean_dv_5(x): suite=["extended"], hf_repo="MixEval/MixEval", hf_subset="MixEval_Hard", - metric=[llm_judge_mixeval_freeform_flow_judge, llm_judge_mixeval_freeform_gpt_judge], + metrics=[llm_judge_mixeval_freeform_flow_judge, llm_judge_mixeval_freeform_gpt_judge], hf_avail_splits=["free_form"], evaluation_splits=["free_form"], few_shots_split=None, @@ -231,7 +227,7 @@ def mean_dv_5(x): suite=["extended"], hf_repo="MixEval/MixEval", hf_subset="MixEval_Hard", - metric=[llm_judge_mixeval_multichoice_flow_judge, llm_judge_mixeval_multichoice_gpt_judge], + metrics=[llm_judge_mixeval_multichoice_flow_judge, llm_judge_mixeval_multichoice_gpt_judge], hf_avail_splits=["multiple_choice"], evaluation_splits=["multiple_choice"], few_shots_split=None, diff --git a/src/lighteval/tasks/extended/mt_bench/main.py b/src/lighteval/tasks/extended/mt_bench/main.py index 117e363dd..1756fb212 100644 --- a/src/lighteval/tasks/extended/mt_bench/main.py +++ b/src/lighteval/tasks/extended/mt_bench/main.py @@ -22,9 +22,9 @@ # ruff: noqa: F405, F403, F401, I001 from lighteval.tasks.lighteval_task import LightevalTaskConfig -from lighteval.tasks.requests import Doc +from lighteval.tasks.requests import Doc, SamplingMethod from lighteval.metrics.metrics_sample import JudgeLLMMTBench -from lighteval.metrics.utils.metric_utils import SampleLevelMetricGrouping, MetricCategory, MetricUseCase +from lighteval.metrics.utils.metric_utils import SampleLevelMetricGrouping from lighteval.tasks.extended.mt_bench.judge_prompt_templates import ( flow_judge_prompt_mt_bench_with_ref, flow_judge_prompt_mt_bench_without_ref, @@ -64,8 +64,7 @@ def flow_judge_mt_bench_prompt(question, answer, options, gold): llm_judge_mt_bench = SampleLevelMetricGrouping( metric_name=["judge_score_turn_1", "judge_score_turn_2"], higher_is_better={"judge_score_turn_1": True, "judge_score_turn_2": True}, - category=MetricCategory.LLM_AS_JUDGE_MULTI_TURN, - use_case=MetricUseCase.SUMMARIZATION, + category=SamplingMethod.GENERATIVE, sample_level_fn=JudgeLLMMTBench( judge_model_name="flowaicom/Flow-Judge-v0.1", template=flow_judge_mt_bench_prompt, @@ -88,7 +87,7 @@ def flow_judge_mt_bench_prompt(question, answer, options, gold): evaluation_splits=["train"], few_shots_split="", few_shots_select="random", - metric=[llm_judge_mt_bench], + metrics=[llm_judge_mt_bench], generation_size=1024, stop_sequence=[], ) diff --git a/src/lighteval/tasks/extended/olympiade_bench/main.py b/src/lighteval/tasks/extended/olympiade_bench/main.py index fd5a08d75..090562a1b 100644 --- a/src/lighteval/tasks/extended/olympiade_bench/main.py +++ b/src/lighteval/tasks/extended/olympiade_bench/main.py @@ -218,7 +218,7 @@ def olympiad_bench_prompt(line, task_name: str = None): suite=["extended"], hf_repo="Hothan/OlympiadBench", hf_subset=subset, - metric=[metric], + metrics=[metric], hf_avail_splits=["train"], evaluation_splits=["train"], few_shots_split="train", diff --git a/src/lighteval/tasks/extended/tiny_benchmarks/main.py b/src/lighteval/tasks/extended/tiny_benchmarks/main.py index 3e4cfed6f..d195bc89b 100644 --- a/src/lighteval/tasks/extended/tiny_benchmarks/main.py +++ b/src/lighteval/tasks/extended/tiny_benchmarks/main.py @@ -40,8 +40,8 @@ from lighteval.metrics.metrics import CorpusLevelMetricGrouping, Metrics from lighteval.metrics.metrics_sample import ExactMatches, LoglikelihoodAcc from lighteval.metrics.normalizations import gsm8k_normalizer -from lighteval.metrics.utils.metric_utils import MetricCategory, MetricUseCase from lighteval.tasks.lighteval_task import LightevalTaskConfig +from lighteval.tasks.requests import SamplingMethod # Utility functions @@ -256,7 +256,7 @@ def aggregate(self, y_input): evaluation_splits=task["evaluation_split"], few_shots_split=None, few_shots_select="random_sampling", - metric=[f"tinybench_metric_{name}"], + metrics=[f"tinybench_metric_{name}"], generation_size=generation_size, stop_sequence=stop_sequence, ) @@ -266,11 +266,9 @@ def aggregate(self, y_input): for task_param in task_params: name = task_param["name"] if name == "gsm8k": - category = MetricCategory.GENERATIVE - use_case = MetricUseCase.MATH + category = SamplingMethod.GENERATIVE else: - category = MetricCategory.MULTICHOICE - use_case = MetricUseCase.ACCURACY + category = SamplingMethod.LOGPROBS extend_enum( Metrics, @@ -280,7 +278,6 @@ def aggregate(self, y_input): higher_is_better=dict.fromkeys(TinyCorpusAggregator.METRICS, True), sample_level_fn=TinyCorpusAggregator(name).compute, category=category, - use_case=use_case, corpus_level_fn=TinyCorpusAggregator(name).aggregate, ), ) diff --git a/src/lighteval/tasks/lighteval_task.py b/src/lighteval/tasks/lighteval_task.py index c9a31904b..f5e63c351 100644 --- a/src/lighteval/tasks/lighteval_task.py +++ b/src/lighteval/tasks/lighteval_task.py @@ -20,46 +20,25 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. -import collections import inspect import logging import random from dataclasses import asdict, dataclass, field -from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Tuple +from typing import Callable from datasets import DatasetDict from huggingface_hub import TextGenerationInputGrammarType from multiprocess import Pool from pytablewriter import MarkdownTableWriter -from lighteval.metrics import ( - apply_generative_metric, - apply_llm_as_judge_metric, - apply_multichoice_metric, - apply_multichoice_metric_one_token, - apply_perplexity_metric, - apply_target_perplexity_metric, -) -from lighteval.metrics.metrics import Metric, MetricCategory, Metrics -from lighteval.models.transformers.transformers_model import TransformersModel -from lighteval.tasks.prompt_manager import PromptManager +from lighteval.metrics.metrics import Metric, Metrics +from lighteval.tasks.prompt_manager import FewShotSampler from lighteval.tasks.requests import ( Doc, - GreedyUntilMultiTurnRequest, - GreedyUntilRequest, - LoglikelihoodRequest, - LoglikelihoodRollingRequest, - LoglikelihoodSingleTokenRequest, - Request, - RequestType, - SampleUid, ) from lighteval.utils.utils import ListLike, as_list, download_dataset_worker -if TYPE_CHECKING: - from lighteval.logging.evaluation_tracker import EvaluationTracker - logger = logging.getLogger(__name__) @@ -89,28 +68,31 @@ class LightevalTaskConfig: """ name: str - prompt_function: Callable[[dict, str], Doc | None] + prompt_function: Callable[ + [dict, str], Doc + ] # The prompt function should be used to map a line in the dataset to a Sample hf_repo: str hf_subset: str - metric: ListLike[Metric | Metrics] + metrics: ListLike[Metric] # List of metric , should be configurable # Additional hf dataset config - hf_revision: Optional[str] = None - hf_filter: Optional[Callable[[dict], bool]] = None - hf_avail_splits: Optional[ListLike[str]] = field(default_factory=lambda: ["train", "validation", "test"]) + hf_revision: str | None = None + hf_filter: Callable[[dict], bool] | None = None + hf_avail_splits: ListLike[str] = field(default_factory=lambda: ["train", "validation", "test"]) + # We default to false, to reduce security issues trust_dataset: bool = False # Splits evaluation_splits: ListLike[str] = field(default_factory=lambda: ["validation"]) - few_shots_split: Optional[str] = None - few_shots_select: Optional[str] = None + few_shots_split: str | None = None + few_shots_select: str | None = None # Generation args - generation_size: Optional[int] = None - generation_grammar: Optional[TextGenerationInputGrammarType] = None - stop_sequence: Optional[ListLike[str]] = None - num_samples: Optional[list[int]] = None + generation_size: int | None = None + generation_grammar: TextGenerationInputGrammarType | None = None + stop_sequence: ListLike[str] | None = None + num_samples: list[int] | None = None suite: ListLike[str] = field(default_factory=lambda: ["custom"]) @@ -119,18 +101,22 @@ class LightevalTaskConfig: must_remove_duplicate_docs: bool = False + num_fewshots: int = 0 + truncate_fewshots: bool = False + version: int = 0 def __post_init__(self): # If we got a Metrics enums instead of a Metric, we convert - self.metric = [metric.value if isinstance(metric, Metrics) else metric for metric in self.metric] + self.metrics = [metric.value if isinstance(metric, Metrics) else metric for metric in self.metrics] # Convert list to tuple for hashing - self.metric = tuple(self.metric) - self.hf_avail_splits = tuple(self.hf_avail_splits) if self.hf_avail_splits is not None else None + self.metrics = tuple(self.metrics) + self.hf_avail_splits = tuple(self.hf_avail_splits) self.evaluation_splits = tuple(self.evaluation_splits) self.suite = tuple(self.suite) - self.stop_sequence = tuple(self.stop_sequence) if self.stop_sequence is not None else () + self.stop_sequence = self.stop_sequence if self.stop_sequence is not None else () + self.full_name = f"{self.name}|{self.num_fewshots}" def print(self): md_writer = MarkdownTableWriter() @@ -159,54 +145,53 @@ def print(self): class LightevalTask: - def __init__( # noqa: C901 - self, name: str, cfg: LightevalTaskConfig, cache_dir: Optional[str] = None + def __init__( + self, + config: LightevalTaskConfig, ): """ Initialize a LightEval task. Args: - name (str): name of the task. - cfg (dict): configuration dictionary containing + config (dict): configuration dictionary containing task-specific settings (from the task_table.json file). - cache_dir (Optional[str], optional): directory to cache the - dataset. Defaults to None. """ - self.name = name - self.version = cfg.version - self.cache_dir = cache_dir - self._cfg = cfg + self.config = config + self.name = config.name + self.version = config.version + self.suite = config.suite + self.dataset_config = config + + self.full_name = config.full_name # Dataset info - self.dataset_path = cfg.hf_repo - self.dataset_config_name = cfg.hf_subset - self.dataset_revision = cfg.hf_revision - self.dataset_filter = cfg.hf_filter - self.trust_dataset = cfg.trust_dataset - self.dataset: Optional[DatasetDict] = None # Delayed download - logger.info(f"{self.dataset_path} {self.dataset_config_name}") - self._fewshot_docs = None + self.dataset_path = config.hf_repo + self.dataset_config_name = config.hf_subset + self.dataset_revision = config.hf_revision + self.dataset_filter = config.hf_filter + self.trust_dataset = config.trust_dataset + self.dataset: DatasetDict | None = None # Delayed download + self.evaluation_split = as_list(config.evaluation_splits) self._docs = None - self.evaluation_split = as_list(cfg.evaluation_splits) + self._fewshot_docs = None + self.fewshot_split: str | None = config.few_shots_split or self.get_first_possible_fewshot_splits( + config.hf_avail_splits or [] + ) + self.fewshot_selection = config.few_shots_select + self.must_remove_duplicate_docs = config.must_remove_duplicate_docs - self.fewshot_split: list[str] | None - if cfg.few_shots_split is not None: - self.fewshot_split = as_list(cfg.few_shots_split) - else: - self.fewshot_split = self.get_first_possible_fewshot_splits(cfg.hf_avail_splits or []) - self.fewshot_selection = cfg.few_shots_select + self.formatter = config.prompt_function + self.fewshot_sampler = FewShotSampler(self) # Metrics - self.metrics = as_list(cfg.metric) - self.suite = as_list(cfg.suite) - ignored = [metric for metric in self.metrics if metric.category == MetricCategory.IGNORED] + self.metrics = config.metrics + self.sampling_methods = list({metric.category for metric in self.metrics}) - if len(ignored) > 0: - logger.warning(f"Not implemented yet: ignoring the metric {' ,'.join(ignored)} for task {self.name}.") - - current_categories = [metric.category for metric in self.metrics] - self.has_metric_category = {category: (category in current_categories) for category in MetricCategory} + # generation parameters + self.generation_size = config.generation_size + self.generation_grammar = config.generation_grammar + self.stop_sequence = config.stop_sequence # We assume num_samples always contains 1 (for base generative evals) self.num_samples = [1] @@ -217,31 +202,14 @@ def __init__( # noqa: C901 # Update the number of samples to generate using the information in the metric name self.num_samples.append(extract_num_samples(metric_name)) - self.formatter = cfg.prompt_function - - self.generation_size = cfg.generation_size - self.generation_grammar = cfg.generation_grammar - self.stop_sequence = cfg.stop_sequence - self.must_remove_duplicate_docs = cfg.must_remove_duplicate_docs - - @property - def cfg(self): - return self._cfg - - def get_first_possible_fewshot_splits( - self, available_splits: ListLike[str], number_of_splits: int = 1 - ) -> list[str] | None: + def get_first_possible_fewshot_splits(self, available_splits: ListLike[str]) -> str | None: """ Parses the possible fewshot split keys in order: train, then validation keys and matches them with the available keys. Returns the first available. - Args: - number_of_splits (int, optional): Number of splits to return. - Defaults to 1. - Returns: - list[str]: List of the first available fewshot splits. + str: the first available fewshot splits or None if nothing is available """ # Possible few shot splits are the available splits not used for evaluation possible_fewshot_splits = [k for k in available_splits if k not in self.evaluation_split] @@ -255,7 +223,7 @@ def get_first_possible_fewshot_splits( stored_splits.extend(available_splits) if len(stored_splits) > 0: - return stored_splits[:number_of_splits] + return stored_splits[0] logger.warning(f"Careful, the task {self.name} is using evaluation data to build the few shot examples.") return None @@ -280,7 +248,8 @@ def _get_docs_from_split(self, splits: list[str], few_shots=False) -> list[Doc]: self.dataset_filter, self.dataset_revision, ) - splits = as_list(splits) + + assert self.dataset is not None, f"Dataset {self.dataset_path} not found." docs = [] for split in splits: @@ -291,10 +260,10 @@ def _get_docs_from_split(self, splits: list[str], few_shots=False) -> list[Doc]: item["__few_shots"] = few_shots # Some tasks require to know which is the current item index in order to apply a different prompt template item["__index"] = ix - cur_docs = self.formatter(item, self.name) - if cur_docs is None: - continue - docs.extend(as_list(cur_docs)) + doc = self.formatter(item, self.name) + doc.id = str(ix) + docs.append(doc) + return docs def remove_duplicate_docs(self, docs: list[Doc]) -> list[Doc]: @@ -321,7 +290,8 @@ def fewshot_docs(self) -> list[Doc]: if self.fewshot_split is None: self._fewshot_docs = self._get_docs_from_split(self.evaluation_split, few_shots=True) else: # Normal case - self._fewshot_docs = self._get_docs_from_split(self.fewshot_split, few_shots=True) + self._fewshot_docs = self._get_docs_from_split([self.fewshot_split], few_shots=True) + return self._fewshot_docs def eval_docs(self) -> list[Doc]: @@ -337,199 +307,33 @@ def eval_docs(self) -> list[Doc]: self._docs = self.remove_duplicate_docs(self._docs) return self._docs - def construct_requests( - self, formatted_doc: Doc, context: str, document_id_seed: str, current_task_name: str - ) -> Dict[RequestType, List[Request]]: - """ - Constructs a list of requests from the task based on the given parameters. + def get_docs(self, max_samples: int | None = None) -> list[Doc]: + eval_docs = self.eval_docs() - Args: - formatted_doc (Doc): Formatted document almost straight from the dataset. - ctx (str): Context, which is the few shot examples + the query. - document_id_seed (str): Index of the document in the task appended with the seed used for the few shot sampling. - current_task_name (str): Name of the current task. + if len(eval_docs) == 0: + raise ValueError(f"Task {self.name} has no documents to evaluate skipping.") - Returns: - dict[RequestType, List[Request]]: List of requests. - """ - requests: dict[RequestType, list[Request]] = collections.defaultdict(list) - - if self.has_metric_category[MetricCategory.TARGET_PERPLEXITY]: - golds = formatted_doc.get_golds() - requests[RequestType.LOGLIKELIHOOD] += [ - LoglikelihoodRequest( - task_name=current_task_name, - sample_index=document_id_seed, - request_index=i, - context=context, - choice=gold, - metric_categories=[MetricCategory.TARGET_PERPLEXITY], - images=formatted_doc.images, - ) - for i, gold in enumerate(golds) - ] - if self.has_metric_category[MetricCategory.PERPLEXITY]: - requests[RequestType.LOGLIKELIHOOD_ROLLING] += [ - LoglikelihoodRollingRequest( - task_name=current_task_name, - sample_index=document_id_seed, - request_index=0, - context=context, - metric_categories=[MetricCategory.PERPLEXITY], - images=formatted_doc.images, - ) - ] - if self.has_metric_category[MetricCategory.GENERATIVE_SAMPLING]: - # All the possible sampling tasks require the same generation process - we can do them in one step - # so we select the maximum number of samples and the metrics will select only the - # relevant number of items - requests[RequestType.GREEDY_UNTIL] += [ - GreedyUntilRequest( - task_name=current_task_name, - sample_index=document_id_seed, - request_index=0, - context=context, - stop_sequence=self.stop_sequence, - generation_size=self.generation_size, - generation_grammar=self.generation_grammar, - num_samples=max(self.num_samples), - do_sample=True, - use_logits=False, - metric_categories=[MetricCategory.GENERATIVE_SAMPLING], - images=formatted_doc.images, - ) - ] - if ( - self.has_metric_category[MetricCategory.GENERATIVE] - or self.has_metric_category[MetricCategory.GENERATIVE_LOGPROB] - ): - use_logits = self.has_metric_category[MetricCategory.GENERATIVE_LOGPROB] - requests[RequestType.GREEDY_UNTIL] += [ - GreedyUntilRequest( - task_name=current_task_name, - sample_index=document_id_seed, - request_index=0, - context=context, - stop_sequence=self.stop_sequence, - generation_size=self.generation_size, - generation_grammar=self.generation_grammar, - num_samples=1, - use_logits=use_logits, - metric_categories=[ - c - for c in [ - MetricCategory.GENERATIVE, - MetricCategory.GENERATIVE_LOGPROB, - ] - if self.has_metric_category[c] - ], - images=formatted_doc.images, - ) - ] - if ( - self.has_metric_category[MetricCategory.MULTICHOICE] - or self.has_metric_category[MetricCategory.MULTICHOICE_PMI] - ): - requests[RequestType.LOGLIKELIHOOD] += [ - LoglikelihoodRequest( - task_name=current_task_name, - sample_index=document_id_seed, - request_index=i, - context=context, - choice=choice, - metric_categories=[ - c - for c in [MetricCategory.MULTICHOICE, MetricCategory.MULTICHOICE_PMI] - if self.has_metric_category[c] - ], - images=formatted_doc.images, - ) - for i, choice in enumerate(formatted_doc.choices) - ] - if self.has_metric_category[MetricCategory.MULTICHOICE_PMI]: - assert formatted_doc.unconditioned_query is not None, ( - "Unconditioned query is required for PMI normalization" - ) - requests[RequestType.LOGLIKELIHOOD] += [ - LoglikelihoodRequest( - task_name=current_task_name, - sample_index=document_id_seed, - # The normalization should come after the choices - request_index=i + len(formatted_doc.choices), - context=formatted_doc.unconditioned_query, - choice=choice, - metric_categories=[MetricCategory.MULTICHOICE_PMI], - images=formatted_doc.images, - ) - for i, choice in enumerate(formatted_doc.choices) - ] - if self.has_metric_category[MetricCategory.MULTICHOICE_ONE_TOKEN]: - requests[RequestType.LOGLIKELIHOOD_SINGLE_TOKEN] += [ - LoglikelihoodSingleTokenRequest( - task_name=current_task_name, - sample_index=document_id_seed, - request_index=0, - context=context, - choices=formatted_doc.choices, - metric_categories=[MetricCategory.MULTICHOICE_ONE_TOKEN], - images=formatted_doc.images, - ) - ] - if self.has_metric_category[MetricCategory.LLM_AS_JUDGE_MULTI_TURN]: - requests[RequestType.GREEDY_UNTIL_MULTI_TURN] += [ - GreedyUntilMultiTurnRequest( - task_name=current_task_name, - sample_index=document_id_seed, - request_index=0, - context=context, - stop_sequence=self.stop_sequence, - generation_size=self.generation_size, - metric_categories=[MetricCategory.LLM_AS_JUDGE_MULTI_TURN], - images=formatted_doc.images, - ) - ] - if self.has_metric_category[MetricCategory.LLM_AS_JUDGE]: - requests[RequestType.GREEDY_UNTIL] += [ - GreedyUntilRequest( - task_name=current_task_name, - sample_index=document_id_seed, - request_index=0, - context=context, - stop_sequence=self.stop_sequence, - generation_size=self.generation_size, - generation_grammar=self.generation_grammar, - num_samples=1, - metric_categories=[MetricCategory.LLM_AS_JUDGE], - images=formatted_doc.images, - ) - ] - - return requests + n_samples = min(max_samples, len(eval_docs)) if max_samples else len(eval_docs) + rnd = random.Random() + rnd.seed(42) + rnd.shuffle(eval_docs) - def get_metric_method_from_category(self, metric_category): - if not self.has_metric_category[metric_category]: - raise ValueError(f"Requested a metric category {metric_category} absent from the task list.") + docs = [] - return LightevalTask._get_metric_method_from_category(metric_category) + for doc in eval_docs[:n_samples]: + num_fewshots = self.dataset_config.num_fewshots + doc.task_name = self.full_name + doc.fewshot_samples = self.fewshot_sampler.sample_fewshot_examples( + num_fewshots, 0, formatted_doc=doc, sampler=rnd + ) + doc.sampling_methods.extend(self.sampling_methods) + doc.generation_size = self.generation_size + doc.use_logits = True + doc.stop_sequences = self.stop_sequence + doc.num_samples = max(self.num_samples) + docs.append(doc) - @staticmethod - def _get_metric_method_from_category(metric_category): - if metric_category == MetricCategory.TARGET_PERPLEXITY: - return apply_target_perplexity_metric - if metric_category in [MetricCategory.MULTICHOICE, MetricCategory.MULTICHOICE_PMI]: - return apply_multichoice_metric - if metric_category == MetricCategory.MULTICHOICE_ONE_TOKEN: - return apply_multichoice_metric_one_token - if metric_category == MetricCategory.PERPLEXITY: - return apply_perplexity_metric - if metric_category in [ - MetricCategory.GENERATIVE, - MetricCategory.GENERATIVE_SAMPLING, - MetricCategory.GENERATIVE_LOGPROB, - ]: - return apply_generative_metric - if metric_category in [MetricCategory.LLM_AS_JUDGE_MULTI_TURN, MetricCategory.LLM_AS_JUDGE]: - return apply_llm_as_judge_metric + return docs def aggregation(self): """ @@ -539,7 +343,7 @@ def aggregation(self): return Metrics.corpus_level_fns(self.metrics) @staticmethod - def load_datasets(tasks: list["LightevalTask"], dataset_loading_processes: int = 1) -> None: + def load_datasets(tasks: dict[str, "LightevalTask"], dataset_loading_processes: int = 1) -> None: """ Load datasets from the HuggingFace Hub for the given tasks. @@ -560,7 +364,7 @@ def load_datasets(tasks: list["LightevalTask"], dataset_loading_processes: int = task.dataset_filter, task.dataset_revision, ) - for task in tasks + for task in tasks.values() ] else: with Pool(processes=dataset_loading_processes) as pool: @@ -574,101 +378,12 @@ def load_datasets(tasks: list["LightevalTask"], dataset_loading_processes: int = task.dataset_filter, task.dataset_revision, ) - for task in tasks + for task in tasks.values() ], ) for task, dataset in zip(tasks, datasets): - task.dataset = dataset - - -def create_requests_from_tasks( # noqa: C901 - task_dict: dict[str, LightevalTask], - fewshot_dict: dict[str, list[Tuple[int, bool]]], - num_fewshot_seeds: int, - lm: TransformersModel, - max_samples: int | None, - evaluation_tracker: "EvaluationTracker", - use_chat_template: bool, - system_prompt: str | None, - cot_prompt: str | None, -) -> Tuple[dict[RequestType, list[Request]], dict[SampleUid, Doc]]: - """ - Takes a task dict and a fewshot dict and returns a dict of requests, a dict - of docs, and a dict of requests origins. The construction of prompts and - thus the managing of few shots is done here. - - Args: - task_dict (dict[str, LightevalTask]): A dictionary of tasks. - fewshot_dict (dict[str, list[Tuple[int, bool]]]): A dictionary of few - shot examples. - num_fewshot_seeds (int): number of few shot seeds. - lm (TransformersModel): language model class that will be used to eventually - truncate the few shot examples (we need the maximum input size of the - model) - max_samples (int): maximum number of samples. - evaluation_tracker (EvaluationTracker): evaluation tracker. - use_chat_template (bool): Whether to use the chat template. - system_prompt (str): System prompt - cot_prompt (str): Chain of thought prompt - - Raises: - NotImplementedError: If the request type is not implemented for the - task. - - Returns: - Tuple[dict[RequestType, list[Request]], dict[SampleUid, Doc]]: A - tuple containing the requests and the documents. - """ - docs: dict[SampleUid, Doc] = {} - requests: dict[RequestType, list[Request]] = collections.defaultdict(list) - - # Filter out tasks that don't have any docs - task_dict_items = [(name, task) for name, task in task_dict.items() if len(task.eval_docs()) > 0] - - # Get lists of each type of request - for task_name, task in task_dict_items: - task_docs = list(task.eval_docs()) - n_samples = min(max_samples, len(task_docs)) if max_samples else len(task_docs) - evaluation_tracker.task_config_logger.log_num_docs(task_name, len(task_docs), n_samples) - - # logs out the different versions of the tasks for every few shot - for num_fewshot, _ in fewshot_dict[task_name]: - cur_task_name = f"{task_name}|{num_fewshot}" - evaluation_tracker.versions_logger.log(cur_task_name, task.version) - - rnd = random.Random() - rnd.seed(42) - rnd.shuffle(task_docs) - - prompt_manager = PromptManager(lm=lm, task=task) - seeds = prompt_manager.few_shot_sampler.get_fewshot_seeds(num_fewshot_seeds) - - # We can do several round of fewshots sampling to get some variance information - for seed in seeds: - for doc_id in range(n_samples): - doc_id_seed = f"{doc_id}_{seed}" # if we do several rounds of few shot sampling we have several seeds - for num_fewshot, truncate_few_shots in fewshot_dict[task_name]: - doc = task_docs[doc_id] - doc = prompt_manager.add_context_to_doc( - doc, - num_fewshot=num_fewshot, - seed=seed, - sampler=rnd, - truncate_few_shots=truncate_few_shots, - use_chat_template=use_chat_template, - system_prompt=system_prompt, - cot_prompt=cot_prompt, - ) - - # Constructing the requests - cur_task_name = f"{task_name}|{num_fewshot}" - docs[SampleUid(cur_task_name, doc_id_seed)] = doc - req_type_reqs_dict = task.construct_requests(doc, doc.ctx, doc_id_seed, cur_task_name) - for req_type, reqs in req_type_reqs_dict.items(): - requests[req_type].extend(reqs) - - return requests, docs + tasks[task].dataset = dataset def extract_num_samples(metric_name: str) -> int: diff --git a/src/lighteval/tasks/multilingual/tasks.py b/src/lighteval/tasks/multilingual/tasks.py index f452588c3..d9d0fad0e 100644 --- a/src/lighteval/tasks/multilingual/tasks.py +++ b/src/lighteval/tasks/multilingual/tasks.py @@ -80,7 +80,7 @@ LightevalTaskConfig( name=f"xnli_{language.value}_{formulation.name.lower()}", suite=["lighteval"], - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=None), @@ -135,7 +135,7 @@ LightevalTaskConfig( name=f"xnli2.0_{language.value}_{formulation.name.lower()}", suite=["lighteval"], - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=None), @@ -215,7 +215,7 @@ hf_filter=lambda x: int(x["label"]) in [0, 2], evaluation_splits=["validation"], few_shots_split="train", - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=None), @@ -262,7 +262,7 @@ hf_filter=lambda x: int(x["label"]) in [0, 2], evaluation_splits=("test",), few_shots_split="validation", - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=None), @@ -318,7 +318,7 @@ hf_subset=standardize_tag(language.value), evaluation_splits=("test",), few_shots_split="train", - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=None), @@ -363,7 +363,7 @@ hf_filter=lambda x: int(x["outputs"] or "0") in [1, 2], evaluation_splits=("train",), few_shots_split="validation", - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=None), @@ -399,7 +399,7 @@ hf_filter=lambda x: int(x["label"]) in [1, 2], evaluation_splits=("validation",), few_shots_split="train", - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=None), @@ -434,7 +434,7 @@ # Only keep the positive and negative examples evaluation_splits=("validation",), few_shots_split="train", - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=None), @@ -483,7 +483,7 @@ hf_subset=("copa_ext_ar" if language == Language.ARABIC else standardize_tag(language.value)), evaluation_splits=["test"], few_shots_split="validation", - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -533,7 +533,7 @@ hf_revision="d356ef19a4eb287e88a51d07a56b73ba88c7f188", evaluation_splits=["test"], hf_avail_splits=["test"], - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -585,7 +585,7 @@ hf_subset="parus", evaluation_splits=["train"], few_shots_split="validation", - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -629,7 +629,7 @@ hf_revision="96ed8e0dfc6172dad1d3df338d7b8ba6c1ff9d83", evaluation_splits=["validation"], hf_avail_splits=["validation"], - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -703,7 +703,7 @@ hf_subset="default", evaluation_splits=["validation"], hf_avail_splits=["validation"], - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -737,7 +737,7 @@ hf_subset="default", evaluation_splits=["validation"], few_shots_split="train", - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -766,7 +766,7 @@ hf_subset="hi", evaluation_splits=("validation",), few_shots_split="validation", - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbCharNorm()), @@ -794,7 +794,7 @@ hf_subset="default", evaluation_splits=("valid",), few_shots_split="train", - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -841,7 +841,7 @@ few_shots_split="validation", generation_size=400, stop_sequence=("\n",), - metric=( + metrics=( multilingual_quasi_exact_match_metric(language, "prefix"), multilingual_quasi_f1_score_metric(language), ), @@ -885,7 +885,7 @@ few_shots_split="train", generation_size=400, stop_sequence=("\n",), - metric=( + metrics=( multilingual_quasi_exact_match_metric(Language.GERMAN, "prefix"), multilingual_quasi_f1_score_metric(Language.GERMAN), ), @@ -914,7 +914,7 @@ few_shots_split="train", generation_size=400, stop_sequence=("\n",), - metric=( + metrics=( multilingual_quasi_exact_match_metric(Language.ITALIAN, "prefix"), multilingual_quasi_f1_score_metric(Language.ITALIAN), ), @@ -941,7 +941,7 @@ few_shots_split="validation", generation_size=400, stop_sequence=("\n",), - metric=( + metrics=( multilingual_quasi_exact_match_metric(Language.THAI, "prefix"), multilingual_quasi_f1_score_metric(Language.THAI), ), @@ -966,7 +966,7 @@ hf_subset="sberquad", evaluation_splits=("validation",), few_shots_split="train", - metric=( + metrics=( multilingual_quasi_exact_match_metric(Language.RUSSIAN, "prefix"), multilingual_quasi_f1_score_metric(Language.RUSSIAN), ), @@ -996,7 +996,7 @@ hf_filter=lambda line: any(len(ans) > 0 for ans in line["answers"]["text"]), evaluation_splits=("validation",), few_shots_split="train", - metric=( + metrics=( multilingual_quasi_exact_match_metric(Language.PORTUGUESE, "prefix"), multilingual_quasi_f1_score_metric(Language.PORTUGUESE), ), @@ -1025,7 +1025,7 @@ hf_filter=lambda line: any(len(ans) > 0 for ans in line["answers"]["text"]), evaluation_splits=("validation",), few_shots_split="train", - metric=( + metrics=( multilingual_quasi_exact_match_metric(Language.SPANISH, "prefix"), multilingual_quasi_f1_score_metric(Language.SPANISH), ), @@ -1053,7 +1053,7 @@ hf_subset="plain_text", evaluation_splits=("validation",), few_shots_split="train", - metric=( + metrics=( multilingual_quasi_exact_match_metric(Language.ARABIC, "prefix"), multilingual_quasi_f1_score_metric(Language.ARABIC), ), @@ -1080,7 +1080,7 @@ hf_subset="default", evaluation_splits=("test",), few_shots_split="validation", - metric=( + metrics=( multilingual_quasi_exact_match_metric(Language.SWAHILI, "prefix"), multilingual_quasi_f1_score_metric(Language.SWAHILI), ), @@ -1107,7 +1107,7 @@ hf_subset="default", evaluation_splits=("validation",), few_shots_split="train", - metric=( + metrics=( multilingual_quasi_exact_match_metric(Language.CHINESE, "prefix"), multilingual_quasi_f1_score_metric(Language.CHINESE), ), @@ -1135,7 +1135,7 @@ evaluation_splits=("trial",), few_shots_split="train", generation_size=400, - metric=( + metrics=( multilingual_quasi_exact_match_metric(Language.CHINESE, "prefix"), multilingual_quasi_f1_score_metric(Language.CHINESE), ), @@ -1167,7 +1167,7 @@ evaluation_splits=("test",), hf_avail_splits=("test",), generation_size=400, - metric=( + metrics=( multilingual_quasi_exact_match_metric(language, "prefix"), multilingual_quasi_f1_score_metric(language), ), @@ -1208,7 +1208,7 @@ few_shots_split="valid_hasAns", generation_size=400, stop_sequence=("\n",), - metric=( + metrics=( multilingual_quasi_exact_match_metric(Language.FRENCH, "prefix"), multilingual_quasi_f1_score_metric(Language.FRENCH), ), @@ -1234,7 +1234,7 @@ few_shots_split="train", generation_size=400, stop_sequence=("\n",), - metric=( + metrics=( multilingual_quasi_exact_match_metric(Language.TURKISH, "prefix"), multilingual_quasi_f1_score_metric(Language.TURKISH), ), @@ -1263,7 +1263,7 @@ few_shots_split="train", generation_size=400, stop_sequence=("\n",), - metric=( + metrics=( multilingual_quasi_exact_match_metric(language, "prefix"), multilingual_quasi_f1_score_metric(language), ), @@ -1304,7 +1304,7 @@ hf_subset="c3", evaluation_splits=("validation",), few_shots_split="train", - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -1338,7 +1338,7 @@ evaluation_splits=["test"], few_shots_split="validation", trust_dataset=True, - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -1363,7 +1363,7 @@ few_shots_split="validation", suite=["lighteval"], hf_repo="OALL/AlGhafa-Arabic-LLM-Benchmark-Native", - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -1402,7 +1402,7 @@ hf_avail_splits=["test"], generation_size=400, stop_sequence=("\n",), - metric=[ + metrics=[ multilingual_quasi_exact_match_metric(lang, "prefix"), multilingual_quasi_f1_score_metric(lang), ], @@ -1437,7 +1437,7 @@ hf_subset=language, evaluation_splits=("test",), hf_avail_splits=["test"], - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -1691,7 +1691,7 @@ ), evaluation_splits=("latest",), hf_avail_splits=["latest"], - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -1739,7 +1739,7 @@ trust_dataset=True, evaluation_splits=("test",), few_shots_split="dev", - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -1803,7 +1803,7 @@ hf_avail_splits=["test"], hf_filter=partial(lambda subset, x: x["Subject"].lower() == subset, subset), hf_revision="038c7808122969ead7456361af05cb8f47d247f8", - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -1868,7 +1868,7 @@ subset, sensitivity_label, ), - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -1954,7 +1954,7 @@ hf_filter=partial(lambda subset, line: line["subject"] == subset, subset), evaluation_splits=("test",), few_shots_split="dev", - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -2010,7 +2010,7 @@ hf_filter=lambda x: x["meta"]["domain"] == subset, evaluation_splits=("public_test",), hf_avail_splits=["public_test"], - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -2042,7 +2042,7 @@ hf_subset=subset, evaluation_splits=("test",), few_shots_split="dev", - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -2149,7 +2149,7 @@ hf_subset=subset, evaluation_splits=("test",), few_shots_split="dev", - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -2230,7 +2230,7 @@ hf_subset=subset, evaluation_splits=("test",), hf_avail_splits=["dev"], - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -2277,7 +2277,7 @@ hf_subset=subset, evaluation_splits=("test",), few_shots_split="dev", - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -2342,7 +2342,7 @@ trust_dataset=True, evaluation_splits=("test",), few_shots_split="train", - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -2401,7 +2401,7 @@ trust_dataset=True, evaluation_splits=["test"], few_shots_split="validation", - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -2436,7 +2436,7 @@ hf_subset=standardize_tag(language.value), evaluation_splits=["test"], few_shots_split="validation", - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -2486,7 +2486,7 @@ hf_subset=f"ARC-{subset.capitalize()}", evaluation_splits=("test",), hf_avail_splits=["train"], - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -2522,7 +2522,7 @@ hf_subset=f"ARC-{subset.capitalize()}", evaluation_splits=("test",), few_shots_split="validation", - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -2550,7 +2550,7 @@ evaluation_splits=["test"], few_shots_split="validation", few_shots_select="sequential", - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -2588,7 +2588,7 @@ else "dc1df9df632d14c251594d9129fb833d2ca4429c", evaluation_splits=("test",), few_shots_split="train", - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -2648,7 +2648,7 @@ trust_dataset=True, evaluation_splits=("validation",), hf_avail_splits=["validation"], - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -2721,7 +2721,7 @@ hf_subset="default", evaluation_splits=("validation",), hf_avail_splits=["validation"], - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -2884,7 +2884,7 @@ ), evaluation_splits=("test",), few_shots_split="train", - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -2918,7 +2918,7 @@ evaluation_splits=("test",), few_shots_split="dev", generation_size=-1, - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -2960,7 +2960,7 @@ hf_subset=subset, evaluation_splits=("test",), few_shots_split="train", - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -3010,7 +3010,7 @@ ), evaluation_splits=("validation",), hf_avail_splits=["validation"], - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -3069,7 +3069,7 @@ evaluation_splits=["test"], few_shots_split="validation", trust_dataset=True, - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -3108,7 +3108,7 @@ trust_dataset=True, evaluation_splits=["test"], few_shots_split="validation", - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -3142,7 +3142,7 @@ hf_subset="default", evaluation_splits=("test",), few_shots_split="validation", - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -3177,7 +3177,7 @@ hf_subset="ruopenbookqa", evaluation_splits=("train",), hf_avail_splits=["train"], - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -3223,7 +3223,7 @@ evaluation_splits=["test"], few_shots_split="validation", few_shots_select="sequential", - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -3268,7 +3268,7 @@ hf_subset="mathlogicqa", evaluation_splits=("train",), hf_avail_splits=["train"], - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -3299,7 +3299,7 @@ evaluation_splits=("test",), few_shots_split="validation", generation_size=25, - metric=[ + metrics=[ multilingual_quasi_exact_match_metric(Language.CHINESE, "full"), ], stop_sequence=("\n",), @@ -3324,7 +3324,7 @@ evaluation_splits=("test",), few_shots_split="train", generation_size=25, - metric=[ + metrics=[ multilingual_quasi_exact_match_metric(language, "full"), ], stop_sequence=("\n",), @@ -3363,7 +3363,7 @@ evaluation_splits=("test",), few_shots_split="train", generation_size=25, - metric=[ + metrics=[ multilingual_quasi_exact_match_metric(language, "full"), ], stop_sequence=("\n",), @@ -3433,7 +3433,7 @@ evaluation_splits=("test",), hf_avail_splits=["test"], few_shots_split=None, - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -3524,7 +3524,7 @@ hf_subset=subset, evaluation_splits=("val",), few_shots_split="dev", - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -3561,7 +3561,7 @@ hf_subset="default", evaluation_splits=("train",), hf_avail_splits=["train"], - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -3596,7 +3596,7 @@ hf_subset=year, evaluation_splits=("train",), hf_avail_splits=["train"], - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -3634,7 +3634,7 @@ hf_subset="ruworldtree", evaluation_splits=("train",), hf_avail_splits=["train"], - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -3670,7 +3670,7 @@ hf_subset=f"X-CODAH-{standardize_tag(language.value) if language != Language.JAPANESE else 'jap'}", evaluation_splits=("validation",), hf_avail_splits=["validation"], - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -3730,7 +3730,7 @@ hf_subset=standardize_tag(lang.value), evaluation_splits=["eval"], few_shots_split="train", - metric=get_metrics_for_formulation( + metrics=get_metrics_for_formulation( formulation, [ loglikelihood_acc_metric(normalization=LogProbTokenNorm()), @@ -3777,7 +3777,7 @@ hf_subset=standardize_tag(language.value) if language != Language.JAPANESE else "jp", evaluation_splits=("test",), hf_avail_splits=["test"], - metric=[ + metrics=[ loglikelihood_acc_metric(normalization=None), loglikelihood_acc_metric(normalization=LogProbTokenNorm()), loglikelihood_acc_metric(normalization=LogProbCharNorm()), @@ -3809,7 +3809,7 @@ hf_subset="default", evaluation_splits=("validation",), few_shots_split="train", - metric=[ + metrics=[ loglikelihood_acc_metric(normalization=None), loglikelihood_acc_metric(normalization=LogProbTokenNorm()), loglikelihood_acc_metric(normalization=LogProbCharNorm()), @@ -3862,7 +3862,7 @@ evaluation_splits=("train",), hf_avail_splits=["train"], stop_sequence=("\n",), - metric=[ + metrics=[ multilingual_quasi_exact_match_metric(language, "prefix"), multilingual_quasi_f1_score_metric(language), ] @@ -3919,7 +3919,7 @@ few_shots_split="train", generation_size=400, stop_sequence=("\n",), - metric=[ + metrics=[ multilingual_quasi_exact_match_metric(lang, "prefix"), multilingual_quasi_f1_score_metric(lang), ], @@ -3954,7 +3954,7 @@ hf_avail_splits=["train"], generation_size=400, stop_sequence=("\n",), - metric=[ + metrics=[ multilingual_quasi_exact_match_metric(Language.FRENCH, "prefix"), multilingual_quasi_f1_score_metric(Language.FRENCH), ], @@ -3979,7 +3979,7 @@ hf_avail_splits=["train"], generation_size=400, stop_sequence=("\n",), - metric=[ + metrics=[ multilingual_quasi_exact_match_metric(Language.RUSSIAN, "prefix"), multilingual_quasi_f1_score_metric(Language.RUSSIAN), ], @@ -4075,7 +4075,7 @@ hf_subset=subset, evaluation_splits=("test",), few_shots_split="validation", - metric=[multilingual_quasi_exact_match_metric(Language.ARABIC, "full"), loglikelihood_acc_metric()], + metrics=[multilingual_quasi_exact_match_metric(Language.ARABIC, "full"), loglikelihood_acc_metric()], generation_size=5, stop_sequence=("\n",), ) @@ -4102,7 +4102,7 @@ few_shots_split="valid", generation_size=5, stop_sequence=["\n"], - metric=[multilingual_quasi_exact_match_metric(Language.FRENCH, "full"), loglikelihood_acc_metric()], + metrics=[multilingual_quasi_exact_match_metric(Language.FRENCH, "full"), loglikelihood_acc_metric()], ) ] @@ -4125,7 +4125,7 @@ few_shots_split="train", generation_size=5, stop_sequence=["\n"], - metric=[multilingual_quasi_exact_match_metric(language, "full"), loglikelihood_acc_metric()], + metrics=[multilingual_quasi_exact_match_metric(language, "full"), loglikelihood_acc_metric()], ) for language in [ Language.HINDI, @@ -4378,7 +4378,7 @@ def flores_adapter(lang1, lang2): few_shots_split="dev", few_shots_select=None, generation_size=300, - metric=[Metrics.chrf_plus, Metrics.bleu, Metrics.bleu_1, Metrics.bleu_4], + metrics=[Metrics.chrf_plus, Metrics.bleu, Metrics.bleu_1, Metrics.bleu_4], stop_sequence=["\n"], trust_dataset=True, version=0, diff --git a/src/lighteval/tasks/prompt_manager.py b/src/lighteval/tasks/prompt_manager.py index 22fc9d502..6b7068bd8 100644 --- a/src/lighteval/tasks/prompt_manager.py +++ b/src/lighteval/tasks/prompt_manager.py @@ -26,11 +26,8 @@ from dataclasses import dataclass from enum import Enum from itertools import cycle -from typing import TYPE_CHECKING, Optional, Tuple, Union +from typing import TYPE_CHECKING -from lighteval.models.abstract_model import LightevalModel -from lighteval.models.endpoints.inference_providers_model import InferenceProvidersClient -from lighteval.models.litellm_model import LiteLLMClient from lighteval.tasks.requests import Doc from lighteval.utils.utils import as_list @@ -43,284 +40,124 @@ class PromptManager: - def __init__(self, task: "LightevalTask", lm: LightevalModel): - self.model = lm - self.task = task - self.few_shot_sampler = FewShotSampler(task) + def __init__(self, use_chat_template: bool = False, tokenizer=None, system_prompt: str | None = None): + self.use_chat_template = use_chat_template + self.tokenizer = tokenizer + self.system_prompt = system_prompt # System prompt to be used in chat templates + + def prepare_prompt(self, doc: Doc) -> str: + """Prepare a prompt from a document, either using chat template or plain text format.""" + if self.use_chat_template: + return self._prepare_chat_template(doc) + else: + return self._prepare_plain_text(doc) - @staticmethod - def doc_to_text(doc: Doc, return_instructions: bool = False) -> Union[str, Tuple[str, str]]: - """ - Returns the query of the document without the instructions. If the - document has instructions, it removes them from the query: + def prepare_prompt_multimodal(self, doc: Doc) -> str: + if self.use_chat_template is False or self.tokenizer is None: + raise ValueError("Multimodal prompts are only supported with chat template format.") - Args: - doc (Doc): document class, containing the query and the - instructions. + if doc.images is None: + raise ValueError("Multimodal prompts require images to be provided in the document.") - Returns: - str: Query of the document without the instructions. - """ - instructions = doc.instruction if doc.instruction is not None else "" - if not doc.query.startswith(instructions): - raise ValueError(f"Prompt query {doc.query} is not starting with instruction {instructions}") + text_content = [{"type": "text", "text": doc.query}] + image_content = [{"type": "image", "image": image} for image in doc.images] + message = {"role": "user", "content": text_content + image_content} - return ( - (doc.query[len(instructions) :], instructions) if return_instructions else doc.query[len(instructions) :] - ) + if ( + self.system_prompt is not None or doc.instruction is not None + ): # We add system prompt and instruction jointly if possible + system_prompt = self.system_prompt if self.system_prompt is not None else "" + instruction = doc.instruction if doc.instruction is not None else "" + system_content = [{"type": "text", "text": system_prompt + instruction}] + system_prompt_message = {"role": "system", "content": system_content} + message = [system_prompt_message, message] - @staticmethod - def doc_to_target(formatted_doc: Doc) -> str: - """ - Returns the target of the given document. + else: + message = [message] - Args: - formatted_doc (Doc): Formatted document. + return self.tokenizer.apply_chat_template( + message, + tokenize=False, + add_generation_prompt=True, + ) - Returns: - str: Target of the document, which is the correct answer for a document. + def prepare_prompt_api(self, doc: Doc) -> list[dict[str, str]]: """ - return as_list(formatted_doc.get_golds())[0] - - @staticmethod - def doc_to_fewshot_sorting_class(formatted_doc: Doc) -> str: + Prepare a prompt for API calls, using a chat-like format. + Will not tokenize the message because APIs will usually handle this. """ - In some cases, when selecting few-shot samples, we want to use specific document classes - which need to be specified separately from the target. - For example, a document where the gold is a json might want to use only one of the keys of - the json to define sorting classes in few shot samples. Else we take the gold. - - Args: - formatted_doc (Doc): Formatted document. - - Returns: - str: Class of the fewshot document - """ - return formatted_doc.fewshot_sorting_class or PromptManager.doc_to_target(formatted_doc) - - def add_context_to_doc( - self, - doc: Doc, - num_fewshot: int, - seed: int, - sampler: Optional[random.Random] = None, - truncate_few_shots: bool = False, - use_chat_template=False, - system_prompt: str = None, - cot_prompt: str = None, - ) -> Doc: - is_multi_turn = doc.specific is not None and len(doc.specific.get("multi_turn_queries", [])) > 0 - if is_multi_turn: - ctx, num_effective_few_shots = self._multi_turn_contexts(doc, use_chat_template, system_prompt) - doc.specific["multi_turn_queries_context"] = ctx - else: - ctx, num_effective_few_shots = self._single_turn_context( - doc=doc, - num_fewshot=num_fewshot, - seed=seed, - truncate_few_shots=truncate_few_shots, - sampler=sampler, - use_chat_template=use_chat_template, - system_prompt=system_prompt, - cot_prompt=cot_prompt, - ) - doc.num_effective_few_shots = num_effective_few_shots - doc.num_asked_few_shots = num_fewshot - doc.ctx = ctx - - return doc - - def _multi_turn_contexts(self, doc: Doc, use_chat_template: bool, system_prompt: Optional[str]) -> list[str]: - """Creates N contexts (depending on the number of turn) for a tasks. - Multi turn tasks need use chat templating. + return self._prepare_chat_template(doc, tokenize=False) - Args: - doc (Doc): Formatted document. - use_chat_template (bool): wether or not to use chat template. Will fail if false. - system_prompt (Optional[str]): The system prompt to use - tokenizer (PreTrainedTokenizer): The tokenizer used for the chat template + def _prepare_chat_template(self, doc: Doc, tokenize: bool = True) -> str: + """Prepare prompt using chat template format.""" + messages = [] + instruction_used = False # Flag to check if instruction is used in the first few-shot example - Raises: - ValueError: If use_chat_template is set to false. + # Add system prompt if available + if self.system_prompt is not None: + messages.append({"role": "system", "content": self.system_prompt}) - Returns: - list[str]: contexts for every turn - """ - if not use_chat_template: - raise ValueError("You need to use the chat template to create multi turn contexts") - - role_content_list = [] - if system_prompt is not None: - role_content_list.append({"role": "system", "content": system_prompt}) - - for i in doc.specific["multi_turn_queries"]: - role_content_list.append({"role": "user", "content": i}) - role_content_list.append({"role": "assistant", "content": "{model_response}"}) - role_content_list.pop(-1) - - contexts = [] - offset = 2 if system_prompt is not None else 1 - for i in range(0, len(role_content_list), offset + 1): - c = self.model.tokenizer.apply_chat_template( - role_content_list[: i + offset], add_generation_prompt=True, tokenize=False, add_special_tokens=False - ) - contexts.append(c) - - return contexts, 0 - - def _single_turn_context( - self, - doc: Doc, - num_fewshot: int, - seed: int, - sampler: Optional[random.Random] = None, - truncate_few_shots: bool = False, - use_chat_template=False, - system_prompt: str = None, - cot_prompt: str = None, - ): - """Returns a fewshot context string that is made up of a prepended description - (if provided), the `num_fewshot` number of examples, and an appended prompt example. - - :param doc: str - The document as returned from training_docs, validation_docs, or test_docs - should be preformatted. - :param num_fewshot: int - The number of fewshot examples to provide in the returned context string. - :param seed: seed - The random seed used to randomly sample examples. If -1, no shuffling will occur, and the samples taken - will be the `num_fewshot` firsts of the set. - :returns: str - The fewshot context. - """ - if use_chat_template and self.model.tokenizer is None: - raise Exception("You can't use a chat template if your model does not have a tokenizer") + # Add few-shot examples + for ix, fewshot_sample in enumerate(doc.fewshot_samples): + query = self._extract_query(fewshot_sample.query, fewshot_sample.instruction) + if ix == 0 and doc.instruction is not None: + instruction_used = True + query = doc.instruction + query - example, instruction = self.doc_to_text(doc, return_instructions=True) + messages.append({"role": "user", "content": query}) + messages.append({"role": "assistant", "content": fewshot_sample.get_golds()[0]}) - fewshot_ex = self.few_shot_sampler.sample_fewshot_examples( - num_fewshot=num_fewshot, formatted_doc=doc, variance_seed=seed, sampler=sampler - ) + # Add main query + main_query = self._extract_query(doc.query, doc.instruction) - num_effective_fewshots = num_fewshot + if doc.instruction is not None and not instruction_used: + # If instruction is provided, prepend it to the main query + main_query = doc.instruction + main_query - output = self.get_examples( - example=example, - instruction=instruction, - fewshot_ex=fewshot_ex, - system_prompt=system_prompt, - use_chat_template=use_chat_template, - cot_prompt=cot_prompt, - doc=doc, - ) + messages.append({"role": "user", "content": main_query}) - if truncate_few_shots and doc.images is not None: - raise NotImplementedError("Few shot evaluation is not supported for multi-modal tasks yet.") + if tokenize: # for local models + assert self.tokenizer is not None, "Tokenizer must be set for chat template formatting." - # If we need to truncate few-shots to fit in the context - if truncate_few_shots and self.model.max_length is not None and self.model.tokenizer is not None: - if not use_chat_template: - toks = self.model.tok_encode(output) - else: - toks = [self.model.tok_encode(msg["content"]) for msg in output] - toks = [t for ts in toks for t in ts] - - # If self.generation_size is None, the maximum allowed generation size depends - # on the model maximum context length, not on the task - we don't take it into account here - # but we probably should - gen_size = self.task.generation_size if self.task.generation_size is not None else 0 - - while len(toks) + gen_size > self.model.max_length and num_effective_fewshots >= 0: - num_effective_fewshots -= 1 - output = self.get_examples( - example=example, - instruction=instruction, - fewshot_ex=fewshot_ex[:num_effective_fewshots], - system_prompt=system_prompt, - use_chat_template=use_chat_template, - cot_prompt=cot_prompt, - ) - if not use_chat_template: - toks = self.model.tok_encode(output) - else: - toks = [self.model.tok_encode(msg["content"]) for msg in output] - toks = [t for ts in toks for t in ts] - - if type(self.model) in [LiteLLMClient, InferenceProvidersClient]: - return output, num_effective_fewshots - - elif use_chat_template: - return self.model.tokenizer.apply_chat_template( - output, tokenize=False, add_generation_prompt=True - ), num_effective_fewshots - - return output, num_effective_fewshots - - def get_examples( # noqa: C901 - self, - example: str, - instruction: Union[str | None], - fewshot_ex: list[str], - system_prompt: Union[str | None], - use_chat_template: bool, - cot_prompt: Union[str | None], - doc: Doc, - ): - is_multimodal = doc.images is not None + return self.tokenizer.apply_chat_template( + messages, + tokenize=False, + add_generation_prompt=True, + ) - if is_multimodal and not use_chat_template: - raise NotImplementedError("Multi-modal tasks do not support formatting without chat template yet.") + else: # for apis + return messages - if is_multimodal and fewshot_ex: - raise NotImplementedError("Multi-modal tasks do not support fewshot evaluation yet.") + def _prepare_plain_text(self, doc: Doc) -> str: + """Prepare prompt using plain text format.""" + parts = [] - content = example + cot_prompt if cot_prompt is not None else example + # Add system prompt if available + if self.system_prompt is not None: + parts.append(self.system_prompt) - if is_multimodal: - text_content = [{"type": "text", "text": content}] - image_content = [{"type": "image", "image": image} for image in doc.images] - message = {"role": "user", "content": text_content + image_content} + if doc.instruction is not None: + parts.append(doc.instruction) - if ( - system_prompt is not None or instruction is not None - ): # We add system prompt and instruction jointly if possible - system_prompt = system_prompt if system_prompt is not None else "" - instruction = instruction if instruction is not None else "" - system_content = [{"type": "text", "text": system_prompt + instruction}] - system_prompt_message = {"role": "system", "content": system_content} - return [system_prompt_message, message] + # Add few-shot examples + for fewshot_sample in doc.fewshot_samples: + query = self._extract_query(fewshot_sample.query, fewshot_sample.instruction) + parts.append(query + " " + fewshot_sample.get_golds()[0].strip()) - return [message] + # Add main query + query = self._extract_query(doc.query, doc.instruction) + parts.append(query) - # Regular text (not multimodal) - examples = [] + return "\n\n".join(parts) - # Few shot examples - for ex in fewshot_ex: - if use_chat_template: - examples.append({"role": "user", "content": self.doc_to_text(ex, return_instructions=False)}) - examples.append({"role": "assistant", "content": self.doc_to_target(ex)}) + def _extract_query(self, query: str, instruction: str | None) -> str: + """Extract query content, removing instruction prefix if appropriate.""" + if instruction is not None: + if query.startswith(instruction): + return query[len(instruction) :].strip() else: - examples.append(self.doc_to_text(ex, return_instructions=False) + self.doc_to_target(ex)) - - # Actual example - if use_chat_template: - examples.append({"role": "user", "content": content}) - else: - examples.append(content) - - # System prompt and instruction - if use_chat_template: - # We add the instruction to the first example - examples[0]["content"] = instruction + examples[0]["content"] - if system_prompt is not None: # We add system prompt if available - examples.insert(0, {"role": "system", "content": system_prompt}) - return examples - else: - system_prompt = system_prompt if system_prompt is not None else "" - output = system_prompt + instruction + "\n\n".join(examples) - if output == "\n\n": - return "" - return output + return query + return query @dataclass @@ -363,9 +200,9 @@ def sample_fewshot_examples( self, num_fewshot: int, variance_seed: int, - sampler: random.Random = None, - formatted_doc: Doc = None, - ): + sampler: random.Random | None = None, + formatted_doc: Doc | None = None, + ) -> list[Doc]: if num_fewshot == 0: return [] @@ -433,7 +270,7 @@ def _init_fewshot_sampling_balanced( # (or the gold target, if the class is undefined) label_to_instances = defaultdict(list) for instance in fewshotpool: - target = PromptManager.doc_to_fewshot_sorting_class(instance) + target = instance.fewshot_sorting_class or as_list(instance.get_golds())[0] label_to_instances[target].append(instance) # Sort by counts of class labels diff --git a/src/lighteval/tasks/registry.py b/src/lighteval/tasks/registry.py index 4fb8d9230..5ea33d942 100644 --- a/src/lighteval/tasks/registry.py +++ b/src/lighteval/tasks/registry.py @@ -21,14 +21,14 @@ # SOFTWARE. import collections +import copy import importlib import logging import os -from functools import lru_cache, partial +from functools import lru_cache from itertools import groupby from pathlib import Path from types import ModuleType -from typing import Callable, Dict, List, Optional, Union from datasets.load import dataset_module_factory @@ -62,20 +62,17 @@ TRUNCATE_FEW_SHOTS_DEFAULTS = True -LazyLightevalTask = Callable[[], LightevalTask] - - class Registry: """ The Registry class is used to manage the task registry and get task classes. """ - def __init__(self, cache_dir: Optional[str] = None, custom_tasks: Optional[Union[str, Path, ModuleType]] = None): + def __init__(self, custom_tasks: str | Path | ModuleType | None = None): """ Initialize the Registry class. + Registry is responsible for holding a dict of task and their config, initializing a LightevalTask instance when asked. Args: - cache_dir (Optional[str]): Directory path for caching. Defaults to None. custom_tasks (Optional[Union[str, Path, ModuleType]]): Custom tasks to be included in registry. Can be a string path, Path object, or a module. Each custom task should be a module with a TASKS_TABLE exposing a list of LightevalTaskConfig. E.g: @@ -87,49 +84,53 @@ def __init__(self, cache_dir: Optional[str] = None, custom_tasks: Optional[Union ) ] """ - - # Private attributes, not expected to be mutated after initialization - self._cache_dir = cache_dir self._custom_tasks = custom_tasks - def get_task_instance(self, task_name: str): - """ - Get the task class based on the task name (suite|task). - - Args: - task_name (str): Name of the task (suite|task). - Returns: - LightevalTask: Task class. + def get_tasks_from_configs(self, task_configs: list[LightevalTaskConfig]) -> dict[str, LightevalTask]: + return {f"{config.full_name}": LightevalTask(config=config) for config in task_configs} - Raises: - ValueError: If the task is not found in the task registry or custom task registry. + def get_tasks_configs(self, task: str) -> list[LightevalTaskConfig]: """ - task_class = self.task_registry.get(task_name) - if task_class is None: - logger.error(f"{task_name} not found in provided tasks") - raise ValueError(f"Cannot find tasks {task_name} in task list or in custom task registry)") + task is a string of the form "suite|task|few_shot|truncate_few_shots,suite|task|few_shot|truncate_few_shots" - return task_class() + returns a LightevalTaskConfig object based on the task name and fewshot and truncate_few_shots values. + """ + task_info_dict = self.taskinfo_selector(task) + configs = [] + + for task_name, task_info in task_info_dict.items(): + # We can have multiple few_shot and truncate_few_shots values for the same task + for task_info_dict in task_info: + config = self.task_registry.get(task_name) + if config is not None: + config = copy.deepcopy(config) + config.num_fewshots = task_info_dict["fewshots"] + config.truncate_fewshots = task_info_dict["truncate_fewshots"] + config.full_name = f"{task_name}|{config.num_fewshots}" + configs.append(config) + else: + raise ValueError(f"Cannot find task {task_name} in task list or in custom task registry") + + return configs @property @lru_cache - def task_registry(self): + def task_registry(self) -> dict[str, LightevalTaskConfig]: """ Returns: dict[str, LazyLightevalTask]: A dictionary mapping task names (suite|task) to their corresponding LightevalTask classes. Example: { - "lighteval|arc_easy": lambda: LightevalTask(name="lighteval|arc_easy", ...) + "lighteval|arc_easy": LightevalTaskConfig(name="arc_easy", suite="lighteval", ...), } """ - - # Import custom tasks provided by the user custom_tasks_registry = {} custom_tasks_module = [] - TASKS_TABLE = [] + custom_task_configs = [] + if self._custom_tasks is not None: - custom_tasks_module.append(create_custom_tasks_module(custom_tasks=self._custom_tasks)) + custom_tasks_module.append(Registry.create_custom_tasks_module(custom_tasks=self._custom_tasks)) if can_load_extended_tasks(): for extended_task_module in AVAILABLE_EXTENDED_TASKS_MODULES: custom_tasks_module.append(extended_task_module) @@ -137,14 +138,14 @@ def task_registry(self): logger.warning(CANNOT_USE_EXTENDED_TASKS_MSG) for module in custom_tasks_module: - TASKS_TABLE.extend(module.TASKS_TABLE) - # We don't log the tasks themselves as it makes the logs unreadable + custom_task_configs.extend(module.TASKS_TABLE) logger.info(f"Found {len(module.TASKS_TABLE)} custom tasks in {module.__file__}") - if len(TASKS_TABLE) > 0: - custom_tasks_registry = create_lazy_tasks(meta_table=TASKS_TABLE, cache_dir=self._cache_dir) + if len(custom_task_configs) > 0: + custom_tasks_registry = Registry.create_task_config_dict(meta_table=custom_task_configs) + + default_tasks_registry = Registry.create_task_config_dict() - default_tasks_registry = create_lazy_tasks(cache_dir=self._cache_dir) # Check the overlap between default_tasks_registry and custom_tasks_registry intersection = set(default_tasks_registry.keys()).intersection(set(custom_tasks_registry.keys())) if len(intersection) > 0: @@ -152,9 +153,74 @@ def task_registry(self): f"Following tasks ({intersection}) exists both in the default and custom tasks. Will use the custom ones on conflict." ) - # Custom tasks overwrite defaults tasks return {**default_tasks_registry, **custom_tasks_registry} + def taskinfo_selector(self, tasks: str) -> dict[str, list[dict]]: + """ + Converts a input string of tasks name to task information usable by lighteval. + + Args: + tasks (str): A string containing a comma-separated list of tasks definitions in the + format: "task_definition", where it can be + containing a list of tasks. + where task_definition can be: + - path to a file containing a list of tasks (one per line) + - task group defined in TASKS_GROUPS dict in custom tasks file + - task name with few shot in format "suite|task|few_shot|truncate_few_shots" + - task superset in format "suite|task_superset|few_shot|truncate_few_shots" (superset will run all tasks with format "suite|task_superset:{subset}|few_shot|truncate_few_shots") + + + Returns: + tuple[list[str], dict[str, list[tuple[int, bool]]]]: A tuple containing: + - A sorted list of unique task names in the format "suite|task". + - A dictionary mapping each task name to a list of tuples representing the few_shot and truncate_few_shots values. + """ + few_shot_dict = collections.defaultdict(list) + + # We can provide a path to a file with a list of tasks or a string of comma-separated tasks + if os.path.exists(tasks): + with open(tasks, "r") as f: + tasks_list = [line.strip() for line in f if line.strip() and not line.startswith("#")] + else: + tasks_list = tasks.split(",") + + # At this point the strings are either task name/superset name or group names + # Here we deal with group names and map them to corresponding tasks + expanded_tasks_list: list[str] = [] + for maybe_task_group in tasks_list: + # We either expand the group (in case it's a group name), or we keep it as is (in case it's a task name or superset name) + expanded_tasks = self.task_groups_dict.get(maybe_task_group, [maybe_task_group]) + if len(expanded_tasks) > 1: + logger.info(f"Expanding task group {maybe_task_group} to {expanded_tasks}") + expanded_tasks_list.extend(expanded_tasks) + + for task in expanded_tasks_list: + try: + suite_name, task_name, few_shot, truncate_few_shots = tuple(task.split("|")) + truncate_few_shots = int(truncate_few_shots) + except ValueError: + raise ValueError( + f"Cannot get task info from {task}. correct format is suite|task|few_shot|truncate_few_shots" + ) + + if truncate_few_shots not in [0, 1]: + raise ValueError(f"TruncateFewShots must be 0 or 1, got {truncate_few_shots}") + + truncate_few_shots = bool(truncate_few_shots) + few_shot = int(few_shot) + + if suite_name not in DEFAULT_SUITES: + logger.warning( + f"Suite {suite_name} unknown. This is not normal, unless you are testing adding new evaluations." + ) + + # This adds support for task supersets (eg: mmlu -> all the mmlu tasks) + for expanded_task in self.expand_task_definition(f"{suite_name}|{task_name}"): + # Store few_shot info for each task name (suite|task) + few_shot_dict[expanded_task].append({"fewshots": few_shot, "truncate_fewshots": truncate_few_shots}) + + return few_shot_dict + @property @lru_cache def _task_superset_dict(self): @@ -188,7 +254,7 @@ def task_groups_dict(self) -> dict[str, list[str]]: """ if self._custom_tasks is None: return {} - custom_tasks_module = create_custom_tasks_module(custom_tasks=self._custom_tasks) + custom_tasks_module = Registry.create_custom_tasks_module(custom_tasks=self._custom_tasks) tasks_group_dict = {} if hasattr(custom_tasks_module, "TASKS_GROUPS"): tasks_group_dict = custom_tasks_module.TASKS_GROUPS @@ -196,22 +262,6 @@ def task_groups_dict(self) -> dict[str, list[str]]: # We should allow defining task groups as comma-separated strings or lists of tasks return {k: v if isinstance(v, list) else v.split(",") for k, v in tasks_group_dict.items()} - def get_task_dict(self, task_names: list[str]) -> dict[str, LightevalTask]: - """ - Get a dictionary of tasks based on the task name list (suite|task). - - Args: - task_name_list (List[str]): A list of task names (suite|task). - - Returns: - Dict[str, LightevalTask]: A dictionary containing the tasks. - - Notes: - - Each task in the task_name_list will be instantiated with the corresponding task class. - """ - # Select relevant tasks given the subset asked for by the user - return {task_name: self.get_task_instance(task_name) for task_name in task_names} - def expand_task_definition(self, task_definition: str): """ Args: @@ -243,120 +293,51 @@ def print_all_tasks(self): for task_name in tasks_names: print(f" - {task_name}") + @staticmethod + def create_custom_tasks_module(custom_tasks: str | Path | ModuleType) -> ModuleType: + """Creates a custom task module to load tasks defined by the user in their own file. -def create_custom_tasks_module(custom_tasks: Union[str, Path, ModuleType]) -> ModuleType: - """Creates a custom task module to load tasks defined by the user in their own file. - - Args: - custom_tasks (Optional[Union[str, ModuleType]]): Path to the custom tasks file or name of a module to import containing custom tasks or the module itself - - Returns: - ModuleType: The newly imported/created custom tasks modules - """ - if isinstance(custom_tasks, ModuleType): - return custom_tasks - if isinstance(custom_tasks, (str, Path)) and os.path.exists(custom_tasks): - dataset_module = dataset_module_factory(str(custom_tasks), trust_remote_code=True) - return importlib.import_module(dataset_module.module_path) - if isinstance(custom_tasks, (str, Path)): - return importlib.import_module(str(custom_tasks)) - raise ValueError(f"Cannot import custom tasks from {custom_tasks}") - - -def taskinfo_selector(tasks: str, task_registry: Registry) -> tuple[list[str], dict[str, list[tuple[int, bool]]]]: - """ - Converts a input string of tasks name to task information usable by lighteval. - - Args: - tasks (str): A string containing a comma-separated list of tasks definitions in the - format "task_definition|few_shot|truncate_few_shots" or a path to a file - containing a list of tasks. - where task_definition can be: - - path to a file containing a list of tasks (one per line) - - task group defined in TASKS_GROUPS dict in custom tasks file - - task name with few shot in format "suite|task|few_shot|truncate_few_shots" - - task superset in format "suite|task_superset|few_shot|truncate_few_shots" (superset will run all tasks with format "suite|task_superset:{subset}|few_shot|truncate_few_shots") - - - Returns: - tuple[list[str], dict[str, list[tuple[int, bool]]]]: A tuple containing: - - A sorted list of unique task names in the format "suite|task". - - A dictionary mapping each task name to a list of tuples representing the few_shot and truncate_few_shots values. - """ - few_shot_dict = collections.defaultdict(list) - - # We can provide a path to a file with a list of tasks or a string of comma-separated tasks - if "." in tasks and os.path.exists(tasks): - with open(tasks, "r") as f: - tasks_list = [line.strip() for line in f if line.strip() and not line.startswith("#")] - else: - tasks_list = tasks.split(",") - - # At this point the strings are either task name/superset name or group names - # Here we deal with group names and map them to corresponding tasks - expanded_tasks_list: list[str] = [] - for maybe_task_group in tasks_list: - # We either expand the group (in case it's a group name), or we keep it as is (in case it's a task name or superset name) - expanded_tasks = task_registry.task_groups_dict.get(maybe_task_group, [maybe_task_group]) - expanded_tasks_list.extend(expanded_tasks) - - for task in expanded_tasks_list: - try: - suite_name, task_name, few_shot, truncate_few_shots = tuple(task.split("|")) - truncate_few_shots = int(truncate_few_shots) - except ValueError: - raise ValueError( - f"Cannot get task info from {task}. correct format is suite|task|few_shot|truncate_few_shots" - ) - - if truncate_few_shots not in [0, 1]: - raise ValueError(f"TruncateFewShots must be 0 or 1, got {truncate_few_shots}") - - truncate_few_shots = bool(truncate_few_shots) - few_shot = int(few_shot) - - if suite_name not in DEFAULT_SUITES: - logger.warning( - f"Suite {suite_name} unknown. This is not normal, unless you are testing adding new evaluations." - ) - - # This adds support for task supersets (eg: mmlu -> all the mmlu tasks) - for expanded_task in task_registry.expand_task_definition(f"{suite_name}|{task_name}"): - # Store few_shot info for each task name (suite|task) - few_shot_dict[expanded_task].append((few_shot, truncate_few_shots)) - - return sorted(few_shot_dict.keys()), {k: list(set(v)) for k, v in few_shot_dict.items()} - - -def create_lazy_tasks( - meta_table: Optional[List[LightevalTaskConfig]] = None, cache_dir: Optional[str] = None -) -> Dict[str, LazyLightevalTask]: - """ - Create configuration tasks based on the provided meta_table. - - Args: - meta_table: meta_table containing tasks - configurations. If not provided, it will be loaded from TABLE_PATH. - cache_dir: Directory to store cached data. If not - provided, the default cache directory will be used. + Args: + custom_tasks (Optional[Union[str, ModuleType]]): Path to the custom tasks file or name of a module to import containing custom tasks or the module itself - Returns: - Dict[str, LightevalTask]: A dictionary of task names mapped to their corresponding LightevalTask classes. - """ + Returns: + ModuleType: The newly imported/created custom tasks modules + """ + if isinstance(custom_tasks, ModuleType): + return custom_tasks + if isinstance(custom_tasks, (str, Path)) and os.path.exists(custom_tasks): + dataset_module = dataset_module_factory(str(custom_tasks), trust_remote_code=True) + return importlib.import_module(dataset_module.module_path) + if isinstance(custom_tasks, (str, Path)): + return importlib.import_module(str(custom_tasks)) + + @staticmethod + def create_task_config_dict(meta_table: list[LightevalTaskConfig] | None = None) -> dict[str, LightevalTaskConfig]: + """ + Create configuration tasks based on the provided meta_table. - if meta_table is None: - meta_table = [config for config in vars(default_tasks).values() if isinstance(config, LightevalTaskConfig)] + Args: + meta_table: meta_table containing tasks + configurations. If not provided, it will be loaded from TABLE_PATH. + cache_dir: Directory to store cached data. If not + provided, the default cache directory will be used. - tasks_with_config: dict[str, LightevalTaskConfig] = {} - # Every task is renamed suite|task, if the suite is in DEFAULT_SUITE - for config in meta_table: - if not any(suite in config.suite for suite in DEFAULT_SUITES): - logger.warning( - f"This evaluation is not in any known suite: {config.name} is in {config.suite}, not in {DEFAULT_SUITES}. Skipping." - ) - continue - for suite in config.suite: - if suite in DEFAULT_SUITES: - tasks_with_config[f"{suite}|{config.name}"] = config + Returns: + Dict[str, LightevalTask]: A dictionary of task names mapped to their corresponding LightevalTask classes. + """ - return {task: partial(LightevalTask, task, cfg, cache_dir=cache_dir) for task, cfg in tasks_with_config.items()} + if meta_table is None: + meta_table = [config for config in vars(default_tasks).values() if isinstance(config, LightevalTaskConfig)] + + tasks_with_config: dict[str, LightevalTaskConfig] = {} + for config in meta_table: + if not any(suite in config.suite for suite in DEFAULT_SUITES): + logger.warning( + f"This evaluation is not in any known suite: {config.name} is in {config.suite}, not in {DEFAULT_SUITES}. Skipping." + ) + continue + for suite in config.suite: + if suite in DEFAULT_SUITES: + tasks_with_config[f"{suite}|{config.name}"] = config + + return tasks_with_config diff --git a/src/lighteval/tasks/requests.py b/src/lighteval/tasks/requests.py index 733040406..8829510b2 100644 --- a/src/lighteval/tasks/requests.py +++ b/src/lighteval/tasks/requests.py @@ -21,11 +21,9 @@ # SOFTWARE. import json -from dataclasses import asdict, dataclass -from enum import Enum, auto -from typing import TYPE_CHECKING, NamedTuple, Optional, Union - -from huggingface_hub import TextGenerationInputGrammarType +from dataclasses import asdict, dataclass, field +from enum import Enum +from typing import TYPE_CHECKING, Union from lighteval.utils.utils import as_list @@ -34,177 +32,198 @@ from PIL.Image import Image -class RequestType(Enum): - LOGLIKELIHOOD = auto() - LOGLIKELIHOOD_SINGLE_TOKEN = auto() - LOGLIKELIHOOD_ROLLING = auto() - GREEDY_UNTIL = auto() - GREEDY_UNTIL_MULTI_TURN = auto() - - -@dataclass -class Request: - """ - Represents a request for a specific task, example and request within that - example in the evaluation process. - For example in the task "boolq", the example "Is the sun hot?" and the - requests for that example "Is the sun hot? Yes" and "Is the sun hot? No". - - Attributes: - task_name (str): The name of the task. - sample_index (int): The index of the example. - request_index (int): The index of the request. - context (str): The context for the request. - metric_categories (list[MetricCategory]): All the metric categories which concern this request - """ - - task_name: str - sample_index: int - request_index: int - context: str - metric_categories: list["MetricCategory"] # noqa F821 - - -@dataclass -class LoglikelihoodRequest(Request): - """ - Represents a request for log-likelihood evaluation. - - Attributes: - choice (str): The choice to evaluate the log-likelihood for. - request_type (RequestType): The type of the request (LOGLIKELIHOOD). - """ - - choice: str - request_type = RequestType.LOGLIKELIHOOD - tokenized_context: list[int] = None - tokenized_continuation: list[int] = None - images: Optional[list["Image"]] = None - - -@dataclass -class LoglikelihoodSingleTokenRequest(Request): - """ - Represents a request for calculating the log-likelihood of a single token. - Faster because we can get all the loglikelihoods in one pass. - - Attributes: - choices (list[str]): The list of token choices. - request_type (RequestType): The type of the request. - """ - - choices: list[str] - request_type = RequestType.LOGLIKELIHOOD_SINGLE_TOKEN - tokenized_context: list[int] = None - tokenized_continuation: list[int] = None - images: Optional[list["Image"]] = None - - -@dataclass -class LoglikelihoodRollingRequest(Request): +class SamplingMethod(str, Enum): """ - Represents a request for log-likelihood rolling evaluation. - - Inherits from the base Request class. + Enum representing different sampling methods for text generation. """ - request_type = RequestType.LOGLIKELIHOOD_ROLLING - tokenized_context: list[int] = None - tokenized_continuation: list[int] = None - images: Optional[list["Image"]] = None - + GENERATIVE = "GENERATIVE" + LOGPROBS = "LOGPROBS" # computes logprobs of choices + PERPLEXITY = "PERPLEXITY" # computes logprobs of the whole prompt -@dataclass -class GreedyUntilRequest(Request): - """ - Represents a request for generating text using the Greedy-Until algorithm. - Attributes: - stop_sequence (str): The sequence of tokens that indicates when to stop generating text. - generation_size (int): The maximum number of tokens to generate. - generation_grammar (TextGenerationInputGrammarType): The grammar to generate completion according to. - Currently only available for TGI models. - request_type (RequestType): The type of the request, set to RequestType.GREEDY_UNTIL. +@dataclass(slots=True) +class Doc: """ + Dataclass representing a single evaluation sample for a benchmark. - stop_sequence: Union[str, tuple[str], list[str]] - generation_size: Union[int, None] - generation_grammar: Union[TextGenerationInputGrammarType, None] = None - request_type = RequestType.GREEDY_UNTIL - tokenized_context: list[int] = None - num_samples: int = None - do_sample: bool = False - use_logits: bool = False - images: Optional[list["Image"]] = None + This class encapsulates all the information needed to evaluate a model on a single + task instance. It contains the input query, expected outputs, metadata, and + configuration parameters for different types of evaluation tasks. + **Required Fields:** + - `query`: The input prompt or question + - `choices`: Available answer choices (for multiple choice tasks) + - `gold_index`: Index(es) of the correct answer(s) -@dataclass -class GreedyUntilMultiTurnRequest(Request): - """ - Represents a request for generating text using the Greedy-Until algorithm. + **Optional Fields:** + - `instruction`: System prompt, task specific. Will be appended to model specific system prompt. + - `images`: Visual inputs for multimodal tasks. Attributes: - stop_sequence (str): The sequence of tokens that indicates when to stop generating text. - generation_size (int): The maximum number of tokens to generate. - request_type (RequestType): The type of the request, set to RequestType.GREEDY_UNTIL. - """ - - stop_sequence: str - generation_size: int - request_type = RequestType.GREEDY_UNTIL_MULTI_TURN - use_logits: bool = False - images: Optional[list["Image"]] = None - - -class SampleUid(NamedTuple): - """ - Represents the identifier for an example in a task. - - Attributes: - task_name (str): The name of the task in `name|num_fewshot` format. - doc_id_seed (str): The document id with the seed used for few_shot appended at the end. - """ - - task_name: str - doc_id_seed: str - - -@dataclass(slots=True) -class Doc: - """ - Dataclass used to represent the content of a task example - almost every field is optional, but some tasks require some fields to be present. - When adding a new task, please add the required fields to the doc class. - Each task will have a different set of fields needed. + query (str): + The main query, prompt, or question to be sent to the model. + + choices (list[str]): + List of possible answer choices for the query. + For multiple choice tasks, this contains all options (A, B, C, D, etc.). + For generative tasks, this may be empty or contain reference answers. + + gold_index (Union[int, list[int]]): + Index or indices of the correct answer(s) in the choices list. + For single correct answers,(e.g., 0 for first choice). + For multiple correct answers, use a list (e.g., [0, 2] for first and third). + + instruction (str | None): + System prompt or task-specific instructions to guide the model. + This is typically prepended to the query to set context or behavior. + + images (list["Image"] | None): + List of PIL Image objects for multimodal tasks. + + specific (dict | None): + Task-specific information or metadata. + Can contain any additional data needed for evaluation. + + unconditioned_query (Optional[str]): + Query without task-specific context for PMI normalization. + Used to calculate: log P(choice | Query) - log P(choice | Unconditioned Query). + + original_query (str | None): + The query before any preprocessing or modification. + + # Set by task parameters + id (str): + Unique identifier for this evaluation instance. + Set by the task and not the user. + + task_name (str): + Name of the task or benchmark this Doc belongs to. + + ## Few-shot Learning Parameters + num_asked_few_shots (int): + Number of few-shot examples requested for this instance. + + num_effective_few_shots (int): + Actual number of few-shot examples used (may differ from requested). + + fewshot_samples (list): + List of Doc objects representing few-shot examples. + These examples are prepended to the main query to provide context. + + sampling_methods (list[SamplingMethod]): + List of sampling methods to use for this instance. + Options: GENERATIVE, LOGPROBS, PERPLEXITY. + + fewshot_sorting_class (Optional[str]): + Class label for balanced few-shot example selection. + Used to ensure diverse representation in few-shot examples. + + ## Generation Control Parameters + generation_size (int | None): + Maximum number of tokens to generate for this instance. + + stop_sequences (list[str] | None): + List of strings that should stop generation when encountered. + **Used for**: Controlled generation, preventing unwanted continuations. + + use_logits (bool): + Whether to return logits (raw model outputs) in addition to text. + **Used for**: Probability analysis, confidence scoring, detailed evaluation. + + num_samples (int): + Number of different samples to generate for this instance. + **Used for**: Diversity analysis, uncertainty estimation, ensemble methods. + + generation_grammar (None): + Grammar constraints for generation (currently not implemented). + **Reserved for**: Future structured generation features. + + Methods: + get_golds(): + Returns the correct answer(s) as strings based on gold_index. + Handles both single and multiple correct answers. + + Usage Examples: + + **Multiple Choice Question:** + ```python + doc = Doc( + query="What is the capital of France?", + choices=["London", "Paris", "Berlin", "Madrid"], + gold_index=1, # Paris is the correct answer + instruction="Answer the following geography question:", + ) + ``` + + **Generative Task:** + ```python + doc = Doc( + query="Write a short story about a robot.", + choices=[], # No predefined choices for generative tasks + gold_index=0, # Not used for generative tasks + generation_size=100, + stop_sequences=["\n\n", "The End"], + ) + ``` + + **Few-shot Learning:** + ```python + doc = Doc( + query="Translate 'Hello world' to Spanish.", + choices=["Hola mundo", "Bonjour monde", "Ciao mondo"], + gold_index=0, + fewshot_samples=[ + Doc(query="Translate 'Good morning' to Spanish.", + choices=["Buenos días", "Bonjour", "Buongiorno"], + gold_index=0), + Doc(query="Translate 'Thank you' to Spanish.", + choices=["Gracias", "Merci", "Grazie"], + gold_index=0) + ], + ) + ``` + + **Multimodal Task:** + ```python + doc = Doc( + query="What is shown in this image?", + choices=["A cat"], + gold_index=0, + images=[pil_image], # PIL Image object + ) + ``` """ query: str choices: list[str] gold_index: Union[int, list[int]] - original_query: Optional[str] = "" # the query before preprocessing, if stored - specific: dict = None # Information which is specific to the current eval - task_name: str = "" - - # For few-shot - instruction: Optional[str] = "" - fewshot_sorting_class: Optional[str] = None # class to use to select balanced few-shot samples - - # Filled when parsing and adding the few-shot context - ctx: Optional[str] = "" - num_asked_few_shots: int = -1 - num_effective_few_shots: int = -1 + instruction: str | None = None # task prompt to use, if any + images: list["Image"] | None = None # for multimodal benchmarks + specific: dict | None = None # Information which is specific to the current eval # Uncoditioned query is used for PMI normalization, that's # log P(choice | Query) - log P(choice | Unconditioned Query) # The uncoditioned query shouldn't contain any information about the task, thus usually it's empty string or 'Answer:'. - unconditioned_query: Optional[str] = None + unconditioned_query: str | None = None + original_query: str | None = None # the query before preprocessing, if stored - # For multi-modal tasks - images: Optional[list["Image"]] = None + id: str = "" + task_name: str = "" - def __post_init__(self): - if self.instruction is None: - self.instruction = "" + # Fewshots parameters + num_asked_few_shots: int = 0 + num_effective_few_shots: int = 0 + fewshot_samples: list = field(default_factory=list) + sampling_methods: list[SamplingMethod] = field(default_factory=list) + fewshot_sorting_class: str | None = None # class to use to select balanced few-shot samples + + # Generation parameters + generation_size: int | None = None # number of tokens to generate for each sample + stop_sequences: list[str] | None = None + use_logits: bool = False # whether to use logits for the generation or not + num_samples: int = 1 # number of samples to generate for each sample + generation_grammar: None = None def get_golds(self): """Return gold targets extracted from the target dict""" diff --git a/tests/metrics/test_extractive_match.py b/tests/metrics/test_extractive_match.py index c3a12c813..0786e14c6 100644 --- a/tests/metrics/test_extractive_match.py +++ b/tests/metrics/test_extractive_match.py @@ -30,6 +30,7 @@ multilingual_extractive_match_metric, ) from lighteval.metrics.utils.math_comparison import sympy_expr_eq +from lighteval.models.model_output import ModelResponse from lighteval.tasks.requests import Doc from lighteval.utils.language import Language @@ -60,15 +61,17 @@ def compare_strings( extraction_targets = tuple(extraction_targets) # Convert to tuple + model_response = ModelResponse(text=[pred]) + doc = Doc(choices=[gold, "", "", ""], query="", gold_index=0) + return multilingual_extractive_match_metric( language=language, gold_extraction_target=extraction_targets, pred_extraction_target=extraction_targets, precision=precision, ).sample_level_fn( - golds=[gold], - predictions=[pred], - formatted_doc=Doc(choices=["", "", "", ""], query="", gold_index=0), + model_response=model_response, + doc=doc, ) diff --git a/tests/metrics/test_metric_requests.py b/tests/metrics/test_metric_requests.py index 6635114f3..b748f7363 100644 --- a/tests/metrics/test_metric_requests.py +++ b/tests/metrics/test_metric_requests.py @@ -24,7 +24,7 @@ from lighteval.metrics.metrics import Metrics from lighteval.metrics.normalizations import LogProbPMINorm from lighteval.metrics.utils.metric_utils import Metric -from lighteval.models.model_output import GenerativeResponse, LoglikelihoodResponse +from lighteval.models.model_output import ModelResponse from lighteval.tasks.default_tasks import xstory_cloze_en_lighteval from lighteval.tasks.lighteval_task import LightevalTask, LightevalTaskConfig from lighteval.tasks.requests import Doc @@ -32,7 +32,7 @@ # Doesn't matter as won't be used -def dummy_prompt_fc(line, task_name: str = None): +def dummy_prompt_fc(line, task_name: str = ""): return Doc( task_name=task_name, query=line["input_sentence_1"], @@ -45,7 +45,8 @@ def dummy_prompt_fc(line, task_name: str = None): def get_pmi_task(metrics: list[Metric]): return LightevalTaskConfig( name="pmi_test_task", - metric=metrics, + metrics=metrics, + suite=["test"], prompt_function=dummy_prompt_fc, hf_repo=xstory_cloze_en_lighteval.hf_repo, hf_subset=xstory_cloze_en_lighteval.hf_subset, @@ -59,25 +60,10 @@ def test_pmi_request(): """ fake_model = FakeModel( loglikelihood_responses=[ - LoglikelihoodResponse( - result=(0.9, True), - generated_tokens=[0], - input_tokens=[0], - ), - LoglikelihoodResponse( - result=(0.2, False), - generated_tokens=[0], - input_tokens=[0], - ), - # Normalization loglikehioods - LoglikelihoodResponse( - result=(0.85, True), - generated_tokens=[0], - input_tokens=[0], - ), - LoglikelihoodResponse( - result=(0.1, False), - generated_tokens=[0], + ModelResponse( + logprobs=[0.9, 0.2, 0.85, 0.1], + argmax_logits_eq_gold=[True, False, True, False], + output_tokens=[[0]], input_tokens=[0], ), ] @@ -85,11 +71,10 @@ def test_pmi_request(): metric = loglikelihood_acc_metric(normalization=LogProbPMINorm()) pmi_test_config = get_pmi_task(metrics=[metric]) - pmi_test_config.metric = (metric,) - task = LightevalTask(pmi_test_config.name, pmi_test_config) - result = fake_evaluate_task(task, fake_model, max_samples=1) + task = LightevalTask(pmi_test_config) + result = fake_evaluate_task(task, fake_model, max_samples=1)["results"]["test:pmi_test_task:0"] # Correct choice after norm should be the second one so 0 acc - assert result[metric.metric_name][0] == 0 + assert result[metric.metric_name] == 0 def test_pmi_request_with_logprob_metric(): @@ -99,25 +84,10 @@ def test_pmi_request_with_logprob_metric(): """ fake_model = FakeModel( loglikelihood_responses=[ - LoglikelihoodResponse( - result=(0.9, True), - generated_tokens=[0], - input_tokens=[0], - ), - LoglikelihoodResponse( - result=(0.2, False), - generated_tokens=[0], - input_tokens=[0], - ), - # Normalization loglikehioods - LoglikelihoodResponse( - result=(0.85, True), - generated_tokens=[0], - input_tokens=[0], - ), - LoglikelihoodResponse( - result=(0.1, False), - generated_tokens=[0], + ModelResponse( + logprobs=[0.9, 0.2, 0.85, 0.1], + argmax_logits_eq_gold=[True, False, True, False], + output_tokens=[[0]], input_tokens=[0], ), ] @@ -125,11 +95,11 @@ def test_pmi_request_with_logprob_metric(): metrics = [loglikelihood_acc_metric(normalization=LogProbPMINorm()), loglikelihood_acc_metric(normalization=None)] pmi_test_config = get_pmi_task(metrics=metrics) - task = LightevalTask(pmi_test_config.name, pmi_test_config) - result = fake_evaluate_task(task, fake_model, max_samples=1) + task = LightevalTask(pmi_test_config) + result = fake_evaluate_task(task, fake_model, max_samples=1)["results"]["test:pmi_test_task:0"] # Correct choice after norm should be the second one so 0 acc - assert result[metrics[0].metric_name][0] == 0 - assert result[metrics[1].metric_name][0] == 1 + assert result[metrics[0].metric_name] == 0 + assert result[metrics[1].metric_name] == 1 def test_pmi_request_with_generative_metric(): @@ -140,32 +110,17 @@ def test_pmi_request_with_generative_metric(): """ fake_model = FakeModel( loglikelihood_responses=[ - LoglikelihoodResponse( - result=(0.9, True), - generated_tokens=[0], - input_tokens=[0], - ), - LoglikelihoodResponse( - result=(0.2, False), - generated_tokens=[0], - input_tokens=[0], - ), - # Normalization loglikehioods - LoglikelihoodResponse( - result=(0.85, True), - generated_tokens=[0], - input_tokens=[0], - ), - LoglikelihoodResponse( - result=(0.1, False), - generated_tokens=[0], + ModelResponse( + logprobs=[0.9, 0.2, 0.85, 0.1], + argmax_logits_eq_gold=[True, False, True, False], + output_tokens=[[0]], input_tokens=[0], ), ], greedy_until_responses=[ - GenerativeResponse( - result="Hello", - generated_tokens=[0], + ModelResponse( + text=["Hello"], + output_tokens=[[0]], input_tokens=[0], ) ], @@ -173,7 +128,7 @@ def test_pmi_request_with_generative_metric(): metrics = [loglikelihood_acc_metric(normalization=LogProbPMINorm()), Metrics.exact_match.value] pmi_test_config = get_pmi_task(metrics=metrics) - task = LightevalTask(pmi_test_config.name, pmi_test_config) - results = fake_evaluate_task(task, fake_model, max_samples=1) - assert results[metrics[0].metric_name][0] == 0 - assert results[metrics[1].metric_name][0] == 1 + task = LightevalTask(pmi_test_config) + results = fake_evaluate_task(task, fake_model, max_samples=1)["results"]["test:pmi_test_task:0"] + assert results[metrics[0].metric_name] == 0 + assert results[metrics[1].metric_name] == 1 diff --git a/tests/models/endpoints/test_endpoint_model.py b/tests/models/endpoints/test_endpoint_model.py index d2291d17e..820a23327 100644 --- a/tests/models/endpoints/test_endpoint_model.py +++ b/tests/models/endpoints/test_endpoint_model.py @@ -42,6 +42,7 @@ class TestInferenceEndpointModelConfig: "region": "eu-west-1", "vendor": "aws", "instance_type": "nvidia-a10g", + "batch_size": 1, "instance_size": "x1", "framework": "pytorch", "endpoint_type": "protected", @@ -49,6 +50,7 @@ class TestInferenceEndpointModelConfig: "image_url": None, "env_vars": None, "add_special_tokens": True, + "system_prompt": None, "generation_parameters": { "early_stopping": None, "frequency_penalty": None, diff --git a/tests/models/endpoints/test_tgi_model.py b/tests/models/endpoints/test_tgi_model.py index ad17d3ab7..93184d5a4 100644 --- a/tests/models/endpoints/test_tgi_model.py +++ b/tests/models/endpoints/test_tgi_model.py @@ -36,6 +36,7 @@ class TestTGIModelConfig: "inference_server_address": "", "inference_server_auth": None, "model_name": None, + "system_prompt": None, "generation_parameters": { "early_stopping": None, "frequency_penalty": None, diff --git a/tests/models/test_abstract_model.py b/tests/models/test_abstract_model.py index 4066f3259..d98bbe840 100644 --- a/tests/models/test_abstract_model.py +++ b/tests/models/test_abstract_model.py @@ -29,10 +29,10 @@ def test_tok_encode_pair(): model = DummyModel(config=DummyModelConfig(seed=42)) model._tokenizer = AutoTokenizer.from_pretrained("facebook/xglm-564M") context = "答案:" - continuation = "1" + continuation = ["1"] non_pairwise_tokens = model.tok_encode_pair(context, continuation, pairwise=False) pairwise_tokens = model.tok_encode_pair(context, continuation, pairwise=True) # Non-pairwise merged ":1" to one token - assert non_pairwise_tokens == ([6, 47873], [34871]) + assert non_pairwise_tokens == ([[6, 47873]], [[34871]]) # Pairwise separated ":" and "1" - assert pairwise_tokens == ([6, 47873, 13], [82]) + assert pairwise_tokens == ([[6, 47873, 13]], [[82]]) diff --git a/tests/models/test_base_model.py b/tests/models/test_base_model.py index 5593db077..7b51b3c0e 100644 --- a/tests/models/test_base_model.py +++ b/tests/models/test_base_model.py @@ -31,6 +31,5 @@ def test_empty_requests(): model: TransformersModel = load_model(config=model_config) assert model.loglikelihood([]) == [] - assert model.loglikelihood_single_token([]) == [] assert model.loglikelihood_rolling([]) == [] assert model.greedy_until([]) == [] diff --git a/tests/reference_scores/SmolLM2-1.7B-Instruct-results-accelerate.json b/tests/reference_scores/SmolLM2-1.7B-Instruct-results-accelerate.json index 38e249bdb..a55fd6f82 100644 --- a/tests/reference_scores/SmolLM2-1.7B-Instruct-results-accelerate.json +++ b/tests/reference_scores/SmolLM2-1.7B-Instruct-results-accelerate.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5a3b8efc2168b8307c729fd3d505c7b681148ab720d4d04d711754f4e47f0669 -size 49156 +oid sha256:d38c5cdb9dd354222ccd238df2675b0999181b663322dab612655aa12f9ef372 +size 49944 diff --git a/tests/reference_scores/SmolLM2-1.7B-Instruct-results-vllm.json b/tests/reference_scores/SmolLM2-1.7B-Instruct-results-vllm.json index ffb7159f5..7bc559c14 100644 --- a/tests/reference_scores/SmolLM2-1.7B-Instruct-results-vllm.json +++ b/tests/reference_scores/SmolLM2-1.7B-Instruct-results-vllm.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ba484d14fa25e113618787e6f496f7b905fa19bc60d23a390959afc89094d315 -size 49053 +oid sha256:be52fd994b9978b91eb057bb72ec6110e2e49016ca0f2b296ba5bf75ba056725 +size 49883 diff --git a/tests/slow_tests/test_accelerate_model.py b/tests/slow_tests/test_accelerate_model.py index 4e91bd194..68f4a33c0 100644 --- a/tests/slow_tests/test_accelerate_model.py +++ b/tests/slow_tests/test_accelerate_model.py @@ -38,7 +38,6 @@ # {"model_name": "gpt2", "use_chat_template": False, "revision": "main", "results_file": "tests/reference_scores/gpt2-results.json"}, { "model_name": "examples/model_configs/transformers_model.yaml", - "use_chat_template": True, "results_file": "tests/reference_scores/SmolLM2-1.7B-Instruct-results-accelerate.json", } ] @@ -49,12 +48,11 @@ @lru_cache(maxsize=len(MODELS_ARGS)) -def run_model(model_name: str, use_chat_template: bool): +def run_model(model_name: str): """Runs the full main as a black box, using the input model and tasks, on 10 samples without parallelism""" results = accelerate( model_args=model_name, tasks=TASKS_PATH, - use_chat_template=use_chat_template, output_dir="", dataset_loading_processes=1, save_details=False, @@ -69,7 +67,7 @@ def generate_tests() -> list[ModelInput]: tests = [] for model_args in MODELS_ARGS: - predictions_lite = partial(run_model, model_args["model_name"], model_args["use_chat_template"]) + predictions_lite = partial(run_model, model_args["model_name"]) tests.append((model_args, predictions_lite)) return tests diff --git a/tests/slow_tests/test_accelerate_vlm_model.py b/tests/slow_tests/test_accelerate_vlm_model.py index fed43edc3..f9a8edfdc 100644 --- a/tests/slow_tests/test_accelerate_vlm_model.py +++ b/tests/slow_tests/test_accelerate_vlm_model.py @@ -37,7 +37,6 @@ MODELS_ARGS = [ { "model_name": "examples/model_configs/transformers_vlm_model.yaml", - "use_chat_template": True, "results_file": "tests/reference_scores/Qwen2.5-VL-3B-Instruct-results-vlm.json", } ] @@ -47,12 +46,11 @@ @lru_cache(maxsize=len(MODELS_ARGS)) -def run_model(model_name: str, use_chat_template: bool): +def run_model(model_name: str): """Runs the full main as a black box, using the input model and tasks, on 10 samples without parallelism""" results = accelerate( model_args=model_name, tasks=TASKS, - use_chat_template=use_chat_template, output_dir="", dataset_loading_processes=1, save_details=False, @@ -67,7 +65,7 @@ def generate_tests() -> list[ModelInput]: tests = [] for model_args in MODELS_ARGS: - predictions_lite = partial(run_model, model_args["model_name"], model_args["use_chat_template"]) + predictions_lite = partial(run_model, model_args["model_name"]) tests.append((model_args, predictions_lite)) return tests diff --git a/tests/slow_tests/test_vllm_model.py b/tests/slow_tests/test_vllm_model.py index 934c9409d..c604c011b 100644 --- a/tests/slow_tests/test_vllm_model.py +++ b/tests/slow_tests/test_vllm_model.py @@ -49,12 +49,11 @@ @lru_cache(maxsize=len(MODELS_ARGS)) -def run_model(model_name: str, use_chat_template: bool): +def run_model(model_name: str): """Runs the full main as a black box, using the input model and tasks, on 10 samples without parallelism""" results = vllm( model_args=model_name, tasks=TASKS_PATH, - use_chat_template=use_chat_template, output_dir="", dataset_loading_processes=1, save_details=False, @@ -69,7 +68,7 @@ def generate_tests() -> list[ModelInput]: tests = [] for model_args in MODELS_ARGS: - predictions_lite = partial(run_model, model_args["model_name"], model_args["use_chat_template"]) + predictions_lite = partial(run_model, model_args["model_name"]) tests.append((model_args, predictions_lite)) return tests diff --git a/tests/tasks/test_lighteval_task.py b/tests/tasks/test_lighteval_task.py index 8c1bcafe7..d338b8a76 100644 --- a/tests/tasks/test_lighteval_task.py +++ b/tests/tasks/test_lighteval_task.py @@ -23,10 +23,11 @@ import pytest from lighteval.tasks.lighteval_task import LightevalTask, LightevalTaskConfig, extract_num_samples +from lighteval.tasks.requests import Doc def dummy_prompt_function(item, task_name): - return item["text"] + return Doc(query=item["text"], choices=["A", "B"], gold_index=0, task_name=task_name) def test_revision_check(): @@ -37,11 +38,13 @@ def test_revision_check(): hf_repo="lighteval-tests-datasets/dataset-test-1", hf_subset="default", evaluation_splits=["train"], - metric=[], + metrics=[], hf_revision="25175defadfde48b131b7cd7573ad6f59f868306", ) - task_with_revision = LightevalTask("test_task_revision", cfg_with_revision) - assert task_with_revision.eval_docs() == ["hi", "how are you?"] + task_with_revision = LightevalTask(cfg_with_revision) + docs = task_with_revision.eval_docs() + queries = [doc.query for doc in docs] + assert queries == ["hi", "how are you?"] def test_dataset_filter(): @@ -53,14 +56,14 @@ def test_dataset_filter(): hf_repo="lighteval-tests-datasets/dataset-test-1", hf_subset="default", hf_filter=lambda x: x["text"] == "hi", - metric=[], + metrics=[], evaluation_splits=["train"], ) - task = LightevalTask("test_task", cfg) + task = LightevalTask(cfg) filtered_docs = task.eval_docs() assert len(filtered_docs) == 1 - assert filtered_docs[0] == "hi" + assert filtered_docs[0].query == "hi" @pytest.mark.parametrize( diff --git a/tests/tasks/test_registry.py b/tests/tasks/test_registry.py index 7fc1ec088..0f51a88b5 100644 --- a/tests/tasks/test_registry.py +++ b/tests/tasks/test_registry.py @@ -23,7 +23,7 @@ import pytest from lighteval.tasks.lighteval_task import LightevalTask, LightevalTaskConfig -from lighteval.tasks.registry import Registry, taskinfo_selector +from lighteval.tasks.registry import Registry TASKS_TABLE = [ @@ -34,7 +34,7 @@ hf_repo="test", hf_subset="default", evaluation_splits=["train"], - metric=[], + metrics=[], ) ] @@ -49,11 +49,13 @@ def test_custom_task_groups(): Tests that task info selector correctly handles custom task groups. """ registry = Registry(custom_tasks="tests.tasks.test_registry") - tasks, task_info = taskinfo_selector("zero_and_one", registry) + task_info = registry.taskinfo_selector("zero_and_one") - assert set(tasks) == {"custom|test_task_revision"} - assert all(task in task_info for task in tasks) - assert all(task_info[task] == [(1, False), (0, False)] for task in tasks) + assert set(task_info.keys()) == {"custom|test_task_revision"} + assert task_info["custom|test_task_revision"] == [ + {"fewshots": 0, "truncate_fewshots": False}, + {"fewshots": 1, "truncate_fewshots": False}, + ] def test_custom_tasks(): @@ -61,10 +63,10 @@ def test_custom_tasks(): Tests that task info selector correctly handles custom tasks. """ registry = Registry(custom_tasks="tests.tasks.test_registry") - tasks, task_info = taskinfo_selector("custom|test_task_revision|0|0", registry) + task_info = registry.taskinfo_selector("custom|test_task_revision|0|0") - assert tasks == ["custom|test_task_revision"] - assert task_info["custom|test_task_revision"] == [(0, False)] + assert list(task_info.keys()) == ["custom|test_task_revision"] + assert task_info["custom|test_task_revision"] == [{"fewshots": 0, "truncate_fewshots": False}] def test_superset_expansion(): @@ -73,10 +75,12 @@ def test_superset_expansion(): """ registry = Registry() - tasks, task_info = taskinfo_selector("lighteval|storycloze|0|0", registry) + task_info = registry.taskinfo_selector("lighteval|storycloze|0|0") - assert set(tasks) == {"lighteval|storycloze:2016", "lighteval|storycloze:2018"} - assert all(task_info[task] == [(0, False)] for task in tasks) + assert list(task_info.keys()) == ["lighteval|storycloze:2016", "lighteval|storycloze:2018"] + assert task_info["lighteval|storycloze:2016"] == [{"fewshots": 0, "truncate_fewshots": False}] and task_info[ + "lighteval|storycloze:2018" + ] == [{"fewshots": 0, "truncate_fewshots": False}] def test_superset_with_subset_task(): @@ -85,12 +89,15 @@ def test_superset_with_subset_task(): """ registry = Registry() - tasks, task_info = taskinfo_selector("original|mmlu|3|0,original|mmlu:abstract_algebra|5|0", registry) + task_info = registry.taskinfo_selector("original|mmlu|3|0,original|mmlu:abstract_algebra|5|0") # We have all mmlu tasks - assert len(tasks) == 57 + assert len(task_info.keys()) == 57 # Since it's defined twice - assert task_info["original|mmlu:abstract_algebra"] == [(5, False), (3, False)] + assert task_info["original|mmlu:abstract_algebra"] == [ + {"fewshots": 3, "truncate_fewshots": False}, + {"fewshots": 5, "truncate_fewshots": False}, + ] def test_task_group_expansion_with_subset_expansion(): @@ -99,9 +106,9 @@ def test_task_group_expansion_with_subset_expansion(): """ registry = Registry(custom_tasks="tests.tasks.test_registry") - tasks = taskinfo_selector("all_mmlu", registry)[0] + task_info = registry.taskinfo_selector("all_mmlu") - assert len(tasks) == 57 + assert len(task_info.keys()) == 57 def test_invalid_task_creation(): @@ -110,7 +117,7 @@ def test_invalid_task_creation(): """ registry = Registry() with pytest.raises(ValueError): - registry.get_task_dict(["custom|task_revision"]) + registry.get_tasks_configs("custom|task_revision") def test_task_duplicates(): @@ -119,10 +126,9 @@ def test_task_duplicates(): """ registry = Registry() - tasks, task_info = taskinfo_selector("custom|test_task_revision|0|0,custom|test_task_revision|0|0", registry) + task_info = registry.taskinfo_selector("custom|test_task_revision|0|0,custom|test_task_revision|0|0") - assert tasks == ["custom|test_task_revision"] - assert task_info["custom|test_task_revision"] == [(0, False)] + assert list(task_info.keys()) == ["custom|test_task_revision"] def test_task_creation(): @@ -130,7 +136,8 @@ def test_task_creation(): Tests that tasks registry correctly creates tasks """ registry = Registry() - task_info = registry.get_task_dict(["lighteval|storycloze:2016"])["lighteval|storycloze:2016"] + task_config = registry.get_tasks_configs("lighteval|storycloze:2016|0|0") + task = registry.get_tasks_from_configs(task_config)["lighteval|storycloze:2016|0"] - assert isinstance(task_info, LightevalTask) - assert task_info.name == "lighteval|storycloze:2016" + assert isinstance(task, LightevalTask) + assert task.name == "storycloze:2016" diff --git a/tests/test_prompt_manager.py b/tests/test_prompt_manager.py index 239f6fd6c..6a0c6a326 100644 --- a/tests/test_prompt_manager.py +++ b/tests/test_prompt_manager.py @@ -21,12 +21,11 @@ # SOFTWARE. import random -from collections import Counter import pytest from lighteval.tasks.lighteval_task import LightevalTask, LightevalTaskConfig -from lighteval.tasks.prompt_manager import FewShotSampler, PromptManager +from lighteval.tasks.prompt_manager import FewShotSampler from lighteval.tasks.requests import Doc @@ -35,13 +34,13 @@ def test_fewshot_sampler(fewshot_select: str): config = LightevalTaskConfig( name="test_fewshot_task", prompt_function=lambda _, __: None, - hf_repo=None, + hf_repo="", hf_subset="default", - metric=[], + metrics=[], few_shots_split="test", few_shots_select=fewshot_select, ) - task = LightevalTask("test_fewshot_task", config) + task = LightevalTask(config) rnd = random.Random(0) task._fewshot_docs = [ Doc(str(i), ["A", "B"], rnd.randint(0, 2), fewshot_sorting_class=str(i % 20)) for i in range(100) @@ -49,10 +48,8 @@ def test_fewshot_sampler(fewshot_select: str): sampler = FewShotSampler(task) seed = 1 docs = sampler.sample_fewshot_examples(20, seed) + match task.fewshot_selection: - case "balanced": - labels = Counter([PromptManager.doc_to_fewshot_sorting_class(d) for d in docs]) - assert labels.total() / len(labels) == 1 case "sequential": assert docs == task.fewshot_docs()[:20] case "random": diff --git a/tests/test_prompt_manager_class.py b/tests/test_prompt_manager_class.py new file mode 100644 index 000000000..f552a9c31 --- /dev/null +++ b/tests/test_prompt_manager_class.py @@ -0,0 +1,463 @@ +# MIT License + +# Copyright (c) 2024 The HuggingFace Team + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from unittest.mock import Mock + +import pytest + +from lighteval.tasks.prompt_manager import PromptManager +from lighteval.tasks.requests import Doc + + +class TestPromptManager: + """Test suite for the PromptManager class.""" + + def test_init_default_values(self): + """Test PromptManager initialization with default values.""" + pm = PromptManager() + assert pm.use_chat_template is False + assert pm.tokenizer is None + assert pm.system_prompt is None + + def test_init_with_chat_template(self): + """Test PromptManager initialization with chat template enabled.""" + tokenizer = Mock() + system_prompt = "You are a helpful assistant." + pm = PromptManager(use_chat_template=True, tokenizer=tokenizer, system_prompt=system_prompt) + assert pm.use_chat_template is True + assert pm.tokenizer == tokenizer + assert pm.system_prompt == system_prompt + + def test_prepare_prompt_plain_text_basic(self): + """Test prepare_prompt with plain text format and basic document.""" + pm = PromptManager() + doc = Doc(query="What is 2+2?", choices=["3", "4", "5"], gold_index=1) + + result = pm.prepare_prompt(doc) + assert result == "What is 2+2?" + + def test_prepare_prompt_plain_text_with_system_prompt(self): + """Test prepare_prompt with plain text format and system prompt.""" + pm = PromptManager(system_prompt="You are a math tutor.") + doc = Doc(query="What is 2+2?", choices=["3", "4", "5"], gold_index=1) + + result = pm.prepare_prompt(doc) + assert result == "You are a math tutor.\n\nWhat is 2+2?" + + def test_prepare_prompt_plain_text_with_instruction(self): + """Test prepare_prompt with plain text format and instruction.""" + pm = PromptManager() + doc = Doc( + query="What is 2+2?", + choices=["3", "4", "5"], + gold_index=1, + instruction="Please answer the following question:", + ) + + result = pm.prepare_prompt(doc) + assert result == "Please answer the following question:\n\nWhat is 2+2?" + + def test_prepare_prompt_plain_text_with_system_and_instruction(self): + """Test prepare_prompt with plain text format, system prompt and instruction.""" + pm = PromptManager(system_prompt="You are a math tutor.") + doc = Doc( + query="What is 2+2?", + choices=["3", "4", "5"], + gold_index=1, + instruction="Please answer the following question:", + ) + + result = pm.prepare_prompt(doc) + assert result == "You are a math tutor.\n\nPlease answer the following question:\n\nWhat is 2+2?" + + def test_prepare_prompt_plain_text_with_fewshot(self): + """Test prepare_prompt with plain text format and few-shot examples.""" + pm = PromptManager() + + # Create few-shot sample + fewshot_doc = Doc(query="What is 1+1?", choices=["1", "2", "3"], gold_index=1) + + doc = Doc(query="What is 2+2?", choices=["3", "4", "5"], gold_index=1) + doc.fewshot_samples = [fewshot_doc] + + result = pm.prepare_prompt(doc) + assert result == "What is 1+1? 2\n\nWhat is 2+2?" + + def test_prepare_prompt_plain_text_with_fewshot_and_instruction(self): + """Test prepare_prompt with plain text format, few-shot examples and instruction.""" + pm = PromptManager() + + # Create few-shot sample with instruction + fewshot_doc = Doc( + query="Please answer the following question: What is 1+1?", + choices=["1", "2", "3"], + gold_index=1, + instruction="Please answer the following question:", + ) + + doc = Doc( + query="Please answer the following question: What is 2+2?", + choices=["3", "4", "5"], + gold_index=1, + instruction="Please answer the following question:", + ) + doc.fewshot_samples = [fewshot_doc] + + result = pm.prepare_prompt(doc) + assert result == "Please answer the following question:\n\nWhat is 1+1? 2\n\nWhat is 2+2?" + + def test_prepare_prompt_chat_template_basic(self): + """Test prepare_prompt with chat template format and basic document.""" + tokenizer = Mock() + tokenizer.apply_chat_template.return_value = "<|user|>\nWhat is 2+2?<|assistant|>" + + pm = PromptManager(use_chat_template=True, tokenizer=tokenizer) + doc = Doc(query="What is 2+2?", choices=["3", "4", "5"], gold_index=1) + + result = pm.prepare_prompt(doc) + assert result == "<|user|>\nWhat is 2+2?<|assistant|>" + tokenizer.apply_chat_template.assert_called_once() + + def test_prepare_prompt_chat_template_with_system_prompt(self): + """Test prepare_prompt with chat template format and system prompt.""" + tokenizer = Mock() + tokenizer.apply_chat_template.return_value = ( + "<|system|>\nYou are a math tutor.<|user|>\nWhat is 2+2?<|assistant|>" + ) + + pm = PromptManager(use_chat_template=True, tokenizer=tokenizer, system_prompt="You are a math tutor.") + doc = Doc(query="What is 2+2?", choices=["3", "4", "5"], gold_index=1) + + result = pm.prepare_prompt(doc) + assert result == "<|system|>\nYou are a math tutor.<|user|>\nWhat is 2+2?<|assistant|>" + + # Verify the call arguments + call_args = tokenizer.apply_chat_template.call_args + messages = call_args[0][0] + assert len(messages) == 2 + assert messages[0]["role"] == "system" + assert messages[0]["content"] == "You are a math tutor." + assert messages[1]["role"] == "user" + assert messages[1]["content"] == "What is 2+2?" + + def test_prepare_prompt_chat_template_with_fewshot(self): + """Test prepare_prompt with chat template format and few-shot examples.""" + tokenizer = Mock() + tokenizer.apply_chat_template.return_value = ( + "<|user|>\nWhat is 1+1?<|assistant|>\n2<|user|>\nWhat is 2+2?<|assistant|>" + ) + + pm = PromptManager(use_chat_template=True, tokenizer=tokenizer) + + # Create few-shot sample + fewshot_doc = Doc(query="What is 1+1?", choices=["1", "2", "3"], gold_index=1) + + doc = Doc(query="What is 2+2?", choices=["3", "4", "5"], gold_index=1) + doc.fewshot_samples = [fewshot_doc] + + result = pm.prepare_prompt(doc) + assert result == "<|user|>\nWhat is 1+1?<|assistant|>\n2<|user|>\nWhat is 2+2?<|assistant|>" + + # Verify the call arguments + call_args = tokenizer.apply_chat_template.call_args + messages = call_args[0][0] + assert len(messages) == 3 + assert messages[0]["role"] == "user" + assert messages[0]["content"] == "What is 1+1?" + assert messages[1]["role"] == "assistant" + assert messages[1]["content"] == "2" + assert messages[2]["role"] == "user" + assert messages[2]["content"] == "What is 2+2?" + + def test_prepare_prompt_chat_template_with_fewshot_and_instruction(self): + """Test prepare_prompt with chat template format, few-shot examples and instruction.""" + tokenizer = Mock() + tokenizer.apply_chat_template.return_value = "<|user|>\nPlease answer the following question: What is 1+1?<|assistant|>\n2<|user|>\nWhat is 2+2?<|assistant|>" + + pm = PromptManager(use_chat_template=True, tokenizer=tokenizer) + + # Create few-shot sample with instruction + fewshot_doc = Doc( + query="Please answer the following question: What is 1+1?", + choices=["1", "2", "3"], + gold_index=1, + instruction="Please answer the following question: ", + ) + + doc = Doc( + query="Please answer the following question: What is 2+2?", + choices=["3", "4", "5"], + gold_index=1, + instruction="Please answer the following question: ", + ) + doc.fewshot_samples = [fewshot_doc] + + result = pm.prepare_prompt(doc) + assert ( + result + == "<|user|>\nPlease answer the following question: What is 1+1?<|assistant|>\n2<|user|>\nWhat is 2+2?<|assistant|>" + ) + + # Verify the call arguments + call_args = tokenizer.apply_chat_template.call_args + messages = call_args[0][0] + assert len(messages) == 3 + assert messages[0]["role"] == "user" + assert messages[0]["content"] == "Please answer the following question: What is 1+1?" + assert messages[1]["role"] == "assistant" + assert messages[1]["content"] == "2" + assert messages[2]["role"] == "user" + assert messages[2]["content"] == "What is 2+2?" + + def test_prepare_prompt_chat_template_with_fewshot_and_instruction_and_system_prompt(self): + """Test prepare_prompt with chat template format, few-shot examples, instruction and system prompt.""" + tokenizer = Mock() + tokenizer.apply_chat_template.return_value = "<|system|>You are a math tutor.\nPlease answer the following question:<|user|>\nWhat is 1+1?<|assistant|>\n2<|user|>\nWhat is 2+2?<|assistant|>" + + pm = PromptManager(use_chat_template=True, tokenizer=tokenizer, system_prompt="You are a math tutor.") + + # Create few-shot sample with instruction + fewshot_doc = Doc( + query="Please answer the following question: What is 1+1?", + choices=["1", "2", "3"], + gold_index=1, + instruction="Please answer the following question:", + ) + + doc = Doc( + query="Please answer the following question: What is 2+2?", + choices=["3", "4", "5"], + gold_index=1, + instruction="Please answer the following question:", + ) + doc.fewshot_samples = [fewshot_doc] + + result = pm.prepare_prompt(doc) + assert ( + result + == "<|system|>You are a math tutor.\nPlease answer the following question:<|user|>\nWhat is 1+1?<|assistant|>\n2<|user|>\nWhat is 2+2?<|assistant|>" + ) + + # Verify the call arguments + call_args = tokenizer.apply_chat_template.call_args + messages = call_args[0][0] + assert len(messages) == 4 + + def test_prepare_prompt_chat_template_no_tokenizer(self): + """Test prepare_prompt with chat template but no tokenizer raises error.""" + pm = PromptManager(use_chat_template=True, tokenizer=None) + doc = Doc(query="What is 2+2?", choices=["3", "4", "5"], gold_index=1) + + with pytest.raises(AssertionError, match="Tokenizer must be set for chat template formatting."): + pm.prepare_prompt(doc) + + def test_prepare_prompt_api_basic(self): + """Test prepare_prompt_api with basic document.""" + pm = PromptManager() + doc = Doc(query="What is 2+2?", choices=["3", "4", "5"], gold_index=1) + + result = pm.prepare_prompt_api(doc) + assert result == [{"role": "user", "content": "What is 2+2?"}] + + def test_prepare_prompt_api_with_system_prompt(self): + """Test prepare_prompt_api with system prompt.""" + pm = PromptManager(system_prompt="You are a math tutor.") + doc = Doc(query="What is 2+2?", choices=["3", "4", "5"], gold_index=1) + + result = pm.prepare_prompt_api(doc) + assert result == [ + {"role": "system", "content": "You are a math tutor."}, + {"role": "user", "content": "What is 2+2?"}, + ] + + def test_prepare_prompt_api_with_instruction(self): + """Test prepare_prompt_api with instruction.""" + pm = PromptManager() + doc = Doc( + query="What is 2+2?", + choices=["3", "4", "5"], + gold_index=1, + instruction="Please answer the following question: ", + ) + + result = pm.prepare_prompt_api(doc) + + assert result == [ + {"role": "user", "content": "Please answer the following question: What is 2+2?"}, + ] + + def test_prepare_prompt_api_with_system_and_instruction(self): + """Test prepare_prompt_api with system prompt and instruction.""" + pm = PromptManager(system_prompt="You are a math tutor.") + doc = Doc( + query="What is 2+2?", + choices=["3", "4", "5"], + gold_index=1, + instruction="Please answer the following question: ", + ) + + result = pm.prepare_prompt_api(doc) + assert result == [ + {"role": "system", "content": "You are a math tutor."}, + {"role": "user", "content": "Please answer the following question: What is 2+2?"}, + ] + + def test_prepare_prompt_multimodal_basic(self): + """Test prepare_prompt_multimodal with basic multimodal document.""" + tokenizer = Mock() + tokenizer.apply_chat_template.return_value = "<|user|>\nWhat is in this image?<|assistant|>" + + pm = PromptManager(use_chat_template=True, tokenizer=tokenizer) + + # Mock image + mock_image = Mock() + + doc = Doc( + query="What is in this image?", choices=["A cat", "A dog", "A bird"], gold_index=0, images=[mock_image] + ) + + result = pm.prepare_prompt_multimodal(doc) + assert result == "<|user|>\nWhat is in this image?<|assistant|>" + + # Verify the call arguments + call_args = tokenizer.apply_chat_template.call_args + messages = call_args[0][0] + assert len(messages) == 1 + assert messages[0]["role"] == "user" + assert len(messages[0]["content"]) == 2 + assert messages[0]["content"][0]["type"] == "text" + assert messages[0]["content"][0]["text"] == "What is in this image?" + assert messages[0]["content"][1]["type"] == "image" + assert messages[0]["content"][1]["image"] == mock_image + + def test_prepare_prompt_multimodal_with_system_prompt(self): + """Test prepare_prompt_multimodal with system prompt.""" + tokenizer = Mock() + tokenizer.apply_chat_template.return_value = ( + "<|system|>\nYou are a helpful assistant.<|user|>\nWhat is in this image?<|assistant|>" + ) + + pm = PromptManager(use_chat_template=True, tokenizer=tokenizer, system_prompt="You are a helpful assistant.") + + mock_image = Mock() + doc = Doc( + query="What is in this image?", choices=["A cat", "A dog", "A bird"], gold_index=0, images=[mock_image] + ) + + result = pm.prepare_prompt_multimodal(doc) + assert result == "<|system|>\nYou are a helpful assistant.<|user|>\nWhat is in this image?<|assistant|>" + + # Verify the call arguments + call_args = tokenizer.apply_chat_template.call_args + messages = call_args[0][0] + assert len(messages) == 2 + assert messages[0]["role"] == "system" + assert messages[0]["content"][0]["type"] == "text" + assert messages[0]["content"][0]["text"] == "You are a helpful assistant." + assert messages[1]["role"] == "user" + assert len(messages[1]["content"]) == 2 + + def test_prepare_prompt_multimodal_no_chat_template(self): + """Test prepare_prompt_multimodal without chat template raises error.""" + pm = PromptManager(use_chat_template=False) + mock_image = Mock() + doc = Doc( + query="What is in this image?", choices=["A cat", "A dog", "A bird"], gold_index=0, images=[mock_image] + ) + + with pytest.raises(ValueError, match="Multimodal prompts are only supported with chat template format."): + pm.prepare_prompt_multimodal(doc) + + def test_prepare_prompt_multimodal_no_tokenizer(self): + """Test prepare_prompt_multimodal without tokenizer raises error.""" + pm = PromptManager(use_chat_template=True, tokenizer=None) + mock_image = Mock() + doc = Doc( + query="What is in this image?", choices=["A cat", "A dog", "A bird"], gold_index=0, images=[mock_image] + ) + + with pytest.raises(ValueError, match="Multimodal prompts are only supported with chat template format."): + pm.prepare_prompt_multimodal(doc) + + def test_prepare_prompt_multimodal_no_images(self): + """Test prepare_prompt_multimodal without images raises error.""" + tokenizer = Mock() + pm = PromptManager(use_chat_template=True, tokenizer=tokenizer) + doc = Doc(query="What is in this image?", choices=["A cat", "A dog", "A bird"], gold_index=0, images=None) + + with pytest.raises(ValueError, match="Multimodal prompts require images to be provided in the document."): + pm.prepare_prompt_multimodal(doc) + + def test_extract_query_no_instruction(self): + """Test _extract_query with no instruction.""" + pm = PromptManager() + result = pm._extract_query("What is 2+2?", None) + assert result == "What is 2+2?" + + def test_extract_query_with_instruction(self): + """Test _extract_query with instruction but no previous shots.""" + pm = PromptManager() + result = pm._extract_query("Please answer: What is 2+2?", "Please answer: ") + assert result == "What is 2+2?" + + def test_prepare_prompt_with_multiple_fewshot_examples(self): + """Test prepare_prompt with multiple few-shot examples.""" + pm = PromptManager() + + # Create few-shot samples + fewshot_doc1 = Doc(query="What is 1+1?", choices=["1", "2", "3"], gold_index=1) + fewshot_doc2 = Doc(query="What is 3+3?", choices=["5", "6", "7"], gold_index=1) + + doc = Doc(query="What is 2+2?", choices=["3", "4", "5"], gold_index=1) + doc.fewshot_samples = [fewshot_doc1, fewshot_doc2] + + result = pm.prepare_prompt(doc) + assert result == "What is 1+1? 2\n\nWhat is 3+3? 6\n\nWhat is 2+2?" + + def test_prepare_prompt_with_empty_fewshot_samples(self): + """Test prepare_prompt with empty few-shot samples.""" + pm = PromptManager() + doc = Doc(query="What is 2+2?", choices=["3", "4", "5"], gold_index=1) + doc.fewshot_samples = [] + + result = pm.prepare_prompt(doc) + assert result == "What is 2+2?" + + def test_prepare_prompt_with_complex_instruction_removal(self): + """Test prepare_prompt with complex instruction removal in few-shot examples.""" + pm = PromptManager() + + # Create few-shot sample with complex instruction + instruction = "Please provide a detailed mathematical answer to the following question:" + fewshot_doc = Doc(query="What is 1+1?", choices=["1", "2", "3"], gold_index=1, instruction=instruction) + + doc = Doc( + query="What is 2+2?", + choices=["3", "4", "5"], + gold_index=1, + instruction=instruction, + ) + doc.fewshot_samples = [fewshot_doc] + + result = pm.prepare_prompt(doc) + expected = f"{instruction}\n\nWhat is 1+1? 2\n\nWhat is 2+2?" + assert result == expected diff --git a/tests/test_unit_base_metrics.py b/tests/test_unit_base_metrics.py index 0570b7e90..65302d127 100644 --- a/tests/test_unit_base_metrics.py +++ b/tests/test_unit_base_metrics.py @@ -32,6 +32,7 @@ ) from lighteval.metrics.metrics_sample import ExactMatches from lighteval.metrics.normalizations import LogProbCharNorm, helm_normalizer +from lighteval.models.model_output import ModelResponse from lighteval.tasks.requests import Doc from lighteval.utils.language import Language @@ -191,87 +192,88 @@ def test_prefix_quasi_exact_match(self): assert res == 0 def test_prob(self): + doc = Doc(query="Test query", choices=["A", "B", "C"], gold_index=0, task_name="test") + # Simple case + model_response = ModelResponse(logprobs=np.log([0.7])) prob_metric = probability_metric() - result = prob_metric.sample_level_fn(logprobs=np.log([0.7]), target_tokens=None, reference_texts=None) + result = prob_metric.sample_level_fn(doc, model_response) assert result == pytest.approx(0.7) # Aggregation function test + model_response = ModelResponse(logprobs=np.log([0.7, 0.1])) prob_min_metric = probability_metric(aggregation_function=np.min) - result = prob_min_metric.sample_level_fn(logprobs=np.log([0.7, 0.1]), target_tokens=None, reference_texts=None) + result = prob_min_metric.sample_level_fn(doc, model_response) assert result == pytest.approx(0.1) def test_mc_probability_metric(self): + doc = Doc(query="Test query", choices=["A", "B", "C"], gold_index=0, task_name="test") + model_response = ModelResponse(logprobs=np.log([0.35, 0.1, 0.05])) + mc_prob_metric = normalized_multi_choice_prob_metric() + result = mc_prob_metric.sample_level_fn( - gold_ixs=[0], - choices_logprob=np.log([0.35, 0.1, 0.05]), - unconditioned_logprob=None, - choices_tokens=None, - formatted_doc=Doc(choices=["A", "B", "C"], gold_index=0, query=""), + doc, + model_response, ) assert result == pytest.approx(0.7) + doc = Doc(query="Test query", choices=["AA", "BB", "CCC"], gold_index=1, task_name="test") + model_response = ModelResponse(logprobs=np.log([0.1**2, 0.35**2, 0.05**3])) + prob_norm_metric = normalized_multi_choice_prob_metric(normalization=LogProbCharNorm()) result = prob_norm_metric.sample_level_fn( - gold_ixs=[1], - choices_logprob=np.log([0.1**2, 0.35**2, 0.05**3]), - unconditioned_logprob=None, - choices_tokens=None, - formatted_doc=Doc(choices=["AA", "BB", "CCC"], gold_index=1, query=""), + doc, + model_response, ) assert result == pytest.approx(0.7) def test_acc(self): # Test without normalization + doc = Doc(query="Test query", choices=["A", "B", "C", "D"], gold_index=0, task_name="test") + model_response = ModelResponse(logprobs=np.log([0.7, 0.2, 0.3, 0.4])) + acc_metric = loglikelihood_acc_metric() result = acc_metric.sample_level_fn( - gold_ixs=[0], - choices_logprob=np.log([0.7, 0.2, 0.3, 0.4]), - unconditioned_logprob=None, - choices_tokens=None, - formatted_doc=Doc(choices=["A", "B", "C", "D"], gold_index=0, query=""), + doc, + model_response, ) assert result == 1 # The highest logprob (3.0) is at index 3, which is not in gold_ixs # Test 0 acc + doc = Doc(query="Test query", choices=["A", "B", "C", "D"], gold_index=0, task_name="test") + model_response = ModelResponse(logprobs=np.log([0.1, 0.2, 0.3, 0.4])) result = acc_metric.sample_level_fn( - gold_ixs=[0], - choices_logprob=np.log([0.1, 0.2, 0.3, 0.4]), - unconditioned_logprob=None, - choices_tokens=None, - formatted_doc=Doc(choices=["A", "B", "C", "D"], gold_index=0, query=""), + doc, + model_response, ) assert result == 0 # Test with normalization + doc = Doc(query="Test query", choices=["ABCDE", "AB"], gold_index=0, task_name="test") + model_response = ModelResponse(logprobs=np.log([0.5, 0.6])) acc_norm_metric = loglikelihood_acc_metric(normalization=LogProbCharNorm()) result_norm = acc_norm_metric.sample_level_fn( - gold_ixs=[0], - choices_logprob=np.log([0.5, 0.6]), - unconditioned_logprob=None, - choices_tokens=None, - formatted_doc=Doc(choices=["ABCDE", "AB"], gold_index=0, query=""), + doc, + model_response, ) assert result_norm == 1 # After normalization, "ABCDE" should have the highest score # Test with multiple correct solutions + doc = Doc(query="Test query", choices=["A", "B", "C", "D"], gold_index=[1, 3], task_name="test") + model_response = ModelResponse(logprobs=np.log([0.5, 0.6, 0.7, 0.8])) result_multi = acc_metric.sample_level_fn( - gold_ixs=[1, 3], - choices_logprob=np.log([0.5, 0.6, 0.7, 0.8]), - unconditioned_logprob=None, - choices_tokens=None, - formatted_doc=Doc(choices=["A", "B", "C", "D"], gold_index=[1, 3], query=""), + doc, + model_response, ) assert result_multi == 1 # Test when the highest logprob is not in gold_ixs + doc = Doc(query="Test query", choices=["A", "B", "C", "D"], gold_index=[1, 2], task_name="test") + model_response = ModelResponse(logprobs=[0.5, 0.6, 0.7, 0.8]) result_incorrect = acc_metric.sample_level_fn( - gold_ixs=[1, 2], - choices_logprob=np.log([0.5, 0.6, 0.7, 0.8]), - unconditioned_logprob=None, - choices_tokens=None, - formatted_doc=Doc(choices=["A", "B", "C", "D"], gold_index=[1, 3], query=""), + doc, + model_response, ) assert result_incorrect == 0 @@ -280,18 +282,22 @@ def test_f1_dynamic_metric(self): Tests that normalization works correctly. We don't test the behavior of the F1_score class as it should be already tested. """ + doc = Doc(query="Test query", choices=["hello world"], gold_index=[0], task_name="test") + model_response = ModelResponse(text=["hello, the world"]) + # Normalization test f1_metric = multilingual_quasi_f1_score_metric(language=Language.ENGLISH) result = f1_metric.sample_level_fn( - golds=["hello world"], - predictions=["hello, the world"], + doc, + model_response, ) assert result == 1 + model_response = ModelResponse(text=["hello, the world how"]) f1_metric = multilingual_quasi_f1_score_metric(language=Language.ENGLISH, aggregation_function=np.min) result = f1_metric.sample_level_fn( - golds=["hello world"], - predictions=["hello, the world how"], + doc, + model_response, ) # 2 * (precision * recall) / (precision + recall) = 2 * (1 * 2/3) / (1 + 2/3) = 0.8 assert result == 0.8 @@ -300,19 +306,22 @@ def test_exact_match_dynamic_metric(self): """ Tests that normalization works correctly. We don't test the behavior of the ExactMatch class as it should be already tested. """ + doc = Doc(query="Test query", choices=["hello world"], gold_index=[0], task_name="test") + model_response = ModelResponse(text=["hello, the world"]) # Normalization test em_metric = multilingual_quasi_exact_match_metric(language=Language.ENGLISH, match_type="full") result = em_metric.sample_level_fn( - golds=["hello world"], - predictions=["hello, the world"], + doc, + model_response, ) assert result == 1 + model_response = ModelResponse(text=["hello, the world how"]) em_metric = multilingual_quasi_exact_match_metric(language=Language.ENGLISH, match_type="full") result = em_metric.sample_level_fn( - golds=["hello world"], - predictions=["hello, the world how"], + doc, + model_response, ) assert result == 0 diff --git a/tests/test_unit_harness_metrics.py b/tests/test_unit_harness_metrics.py index 8918cd7e0..4cc2853ae 100644 --- a/tests/test_unit_harness_metrics.py +++ b/tests/test_unit_harness_metrics.py @@ -25,6 +25,7 @@ import pytest +from lighteval.metrics import apply_metric from lighteval.metrics.metrics import Metrics from lighteval.metrics.sample_preparator import ( GenerativeCorpusMetricInput, @@ -32,7 +33,6 @@ PerplexityCorpusMetricInput, ) from lighteval.models.model_output import ModelResponse -from lighteval.tasks.lighteval_task import LightevalTask from lighteval.tasks.requests import Doc from lighteval.utils.utils import as_list @@ -64,30 +64,47 @@ def pytest_generate_tests(metafunc: pytest.Metafunc): metafunc.parametrize("prompt_inputs", parameters, scope="session") -def test_model_prediction(prompt_inputs: tuple[str, str, list]): +def test_model_prediction(prompt_inputs: tuple[str, str, list]): # noqa: C901 """Evaluates a model on a full task - is parametrized using pytest_generate_test""" metric, task_name, examples = prompt_inputs metric_name = metric metric = Metrics[metric].value - print(metric_name, task_name) + for example in examples: - formatted_doc = { + doc = { k: v for k, v in example.items() if k in ["full_prompt", "choices", "gold_index", "original_query", "specific"] } - print(formatted_doc) - formatted_doc["query"] = formatted_doc.pop("full_prompt") - formatted_doc = Doc(**formatted_doc) - error_msg = f"Metric {metric_name} failed on input {formatted_doc} from task {task_name}.\n" - - results = [ModelResponse(result=i, input_tokens=[], generated_tokens=[]) for i in example["predictions"]] - # todo: update to create list of ModelResults in results - metric_result = apply_metric( - sample_ids=["0"], metric=metric, responses=[results], formatted_docs=[formatted_doc] - )[0] - assert metric_result is not None, error_msg - metric_result = {k: list(v) if isinstance(v, tuple) else v for k, v in metric_result.items()} + doc["query"] = doc.pop("full_prompt") + doc = Doc(**doc) + error_msg = f"Metric {metric_name} failed on input {doc} from task {task_name}.\n" + + match example["predictions"]: + case [first_element, *_] if isinstance(first_element, str): + # If the predictions are a list of strings, we assume it's a generative task + responses = [ModelResponse(text=example["predictions"], output_tokens=[[]], input_tokens=[])] + case [first_element, *_] if isinstance(first_element, float): + # If the predictions are a list of floats, we assume it's a logprob task + responses = [ModelResponse(logprobs=example["predictions"], output_tokens=[[]], input_tokens=[])] + case [first_element, *_] if len(first_element) == 2 and isinstance(first_element[1], bool): + # If the predictions are a list of lists with two elements, we assume it's a loglikelihood task with argmax + responses = [ + ModelResponse( + logprobs=[pred[0] for pred in example["predictions"]], + argmax_logits_eq_gold=[pred[1] for pred in example["predictions"]], + output_tokens=[[]], + input_tokens=[], + ) + ] + case _: + # If the predictions are not a list of strings or floats, we assume it's a custom task + responses = [ModelResponse(logprobs=example["predictions"][0], input_tokens=[])] + + results = apply_metric(responses=responses, docs=[doc], metrics=[metric])[0] + assert responses is not None, error_msg + + metric_result = {k: list(v) if isinstance(v, tuple) else v for k, v in results.items()} metric_reference = {k: v for k, v in example.items() if k in POSSIBLE_METRICS} error_msg += f"Prediction: {results}\n" @@ -110,16 +127,15 @@ def test_model_prediction(prompt_inputs: tuple[str, str, list]): for res, ref in zip(cur_result_list, cur_ref_list): try: assert res == pytest.approx(ref, rel=1e-8), error_msg - except Exception as e: - assert False, error_msg + "\n" + str(e) + except Exception: + assert False, ( + key + "\n" + str(cur_result_list) + "\n" + str(cur_ref_list) + "\n" + task_name + "\n" + ) else: try: assert cur_result_list == pytest.approx(cur_ref_list, rel=1e-8), error_msg - except Exception as e: - assert False, error_msg + "\n" + str(e) - - -def apply_metric(sample_ids, metric, responses, formatted_docs: list[Doc]): - method = LightevalTask._get_metric_method_from_category(metric.category) - cur_outputs = method(sample_ids=sample_ids, metrics=[metric], responses=responses, formatted_docs=formatted_docs) - return cur_outputs + except Exception: + # assert False, error_msg + "\n" + str(e) + assert False, ( + key + "\n" + str(cur_result_list) + "\n" + str(cur_ref_list) + "\n" + task_name + "\n" + ) diff --git a/tests/test_unit_reorder.py b/tests/test_unit_reorder.py index 6212bb646..1e01a1095 100644 --- a/tests/test_unit_reorder.py +++ b/tests/test_unit_reorder.py @@ -20,59 +20,37 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. -from transformers import AutoTokenizer - from lighteval.data import GenerativeTaskDataset -from lighteval.metrics.utils.metric_utils import MetricCategory -from lighteval.tasks.requests import GreedyUntilRequest +from lighteval.metrics.utils.metric_utils import SamplingMethod +from lighteval.tasks.requests import Doc # test data that will need to be sorted by length of the string TEST_DATA = [ - GreedyUntilRequest( - task_name="test", - sample_index=0, - request_index=0, - context="1 The quick brown fox jumps over the lazy dog", - stop_sequence=[":", "stop"], - generation_size=10, - metric_categories=[MetricCategory.GENERATIVE], + Doc( + query="1 The quick brown fox jumps over the lazy dog", + choices=["A", "B", "C"], + gold_index=0, ), - GreedyUntilRequest( - task_name="test", - sample_index=2, - request_index=0, - context="2 The quick brown fox jumps over the lazy dog njsa", - stop_sequence=[":", "stop"], - generation_size=10, - metric_categories=[MetricCategory.GENERATIVE], + Doc( + query="2 The quick brown fox jumps over the lazy dog njsa", + choices=["A", "B", "C"], + gold_index=0, ), - GreedyUntilRequest( - task_name="test", - sample_index=5, - request_index=0, - context="Some text", - stop_sequence=[":", "stop"], - generation_size=10, - metric_categories=[MetricCategory.GENERATIVE], + Doc( + query="Some text", + choices=["A", "B", "C"], + gold_index=0, ), - GreedyUntilRequest( - task_name="test", - sample_index=21, - request_index=0, - context="some more text", - stop_sequence=[":", "stop"], - generation_size=10, - metric_categories=[MetricCategory.GENERATIVE], + Doc( + query="some more text", + choices=["A", "B", "C"], + gold_index=0, ), - GreedyUntilRequest( - task_name="test", - sample_index=1, - request_index=0, - context="not sure what to write here", - stop_sequence=[":", "stop"], - generation_size=10, - metric_categories=[MetricCategory.GENERATIVE], + Doc( + query="not sure what to write here", + choices=["A", "B", "C"], + gold_index=0, ), ] @@ -81,10 +59,12 @@ class TestReorderGenerativeTaskDataset: def test_reorder_dataset(self): - tokenizer = AutoTokenizer.from_pretrained("gpt2") data = TEST_DATA.copy() - for request in data: - request.tokenized_context = tokenizer.encode(request.context) + for d in data: + d.task_name = "test" + d.sampling_methods = [SamplingMethod.GENERATIVE] + d.generation_size = 10 + d.stop_sequences = ["stop", ":", "end"] dataset = GenerativeTaskDataset(requests=data, num_dataset_splits=DATASET_SPLITS) @@ -92,8 +72,8 @@ def test_reorder_dataset(self): original_data = dataset.get_original_order(sorted_data) for i in range(len(sorted_data) - 1): - assert len(sorted_data[i].context) >= len(sorted_data[i + 1].context), ( - f"dataset[{i}][0] = {sorted_data[i].context} is shorter than dataset[{i + 1}][0] = {sorted_data[i + 1].context}" + assert len(sorted_data[i].query) >= len(sorted_data[i + 1].query), ( + f"dataset[{i}][0] = {sorted_data[i].query} is shorter than dataset[{i + 1}][0] = {sorted_data[i + 1].query}" ) assert len(sorted_data) == len(original_data), ( diff --git a/tests/utils.py b/tests/utils.py index 47ffad914..383f49f3b 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -29,20 +29,11 @@ from lighteval.logging.evaluation_tracker import EvaluationTracker from lighteval.models.abstract_model import LightevalModel, ModelInfo -from lighteval.models.model_output import ( - GenerativeResponse, - LoglikelihoodResponse, - LoglikelihoodSingleTokenResponse, -) +from lighteval.models.model_output import ModelResponse from lighteval.pipeline import ParallelismManager, Pipeline, PipelineParameters from lighteval.tasks.lighteval_task import LightevalTask from lighteval.tasks.registry import Registry -from lighteval.tasks.requests import ( - GreedyUntilRequest, - LoglikelihoodRequest, - LoglikelihoodRollingRequest, - LoglikelihoodSingleTokenRequest, -) +from lighteval.tasks.requests import Doc from lighteval.utils.imports import is_accelerate_available @@ -51,16 +42,14 @@ class FakeModel(LightevalModel): def __init__( self, - greedy_until_responses: list[GenerativeResponse] = [], - loglikelihood_responses: list[LoglikelihoodResponse] = [], - loglikelihood_rolling_responses: list[LoglikelihoodResponse] = [], - loglikelihood_single_token_responses: list[LoglikelihoodSingleTokenResponse] = [], + greedy_until_responses: list[ModelResponse] = [], + loglikelihood_responses: list[ModelResponse] = [], + loglikelihood_rolling_responses: list[ModelResponse] = [], ): self._tokenizer = None self.greedy_until_responses = greedy_until_responses self.loglikelihood_responses = loglikelihood_responses self.loglikelihood_rolling_responses = loglikelihood_rolling_responses - self.loglikelihood_single_token_responses = loglikelihood_single_token_responses @property def tokenizer(self): @@ -80,64 +69,54 @@ def max_length(self) -> int: def model_info(self): return ModelInfo(model_name="fake_model") - def greedy_until( - self, requests: list[GreedyUntilRequest], override_bs: Optional[int] = None - ) -> list[GenerativeResponse]: - ret_resp, self.greedy_until_resp = ( - self.greedy_until_responses[: len(requests)], - self.greedy_until_responses[len(requests) :], + def greedy_until(self, docs: list[Doc]) -> list[ModelResponse]: + ret_resp, self.greedy_until_responses = ( + self.greedy_until_responses[: len(docs)], + self.greedy_until_responses[len(docs) :], ) return ret_resp - def loglikelihood( - self, requests: list[LoglikelihoodRequest], override_bs: Optional[int] = None - ) -> list[LoglikelihoodResponse]: + def loglikelihood(self, docs: list[Doc]) -> list[ModelResponse]: ret_resp, self.loglikelihood_responses = ( - self.loglikelihood_responses[: len(requests)], - self.loglikelihood_responses[len(requests) :], + self.loglikelihood_responses[: len(docs)], + self.loglikelihood_responses[len(docs) :], ) return ret_resp - def loglikelihood_rolling( - self, requests: list[LoglikelihoodRollingRequest], override_bs: Optional[int] = None - ) -> list[LoglikelihoodResponse]: + def loglikelihood_rolling(self, docs: list[Doc]) -> list[ModelResponse]: ret_resp, self.loglikelihood_rolling_responses = ( - self.loglikelihood_rolling_responses[: len(requests)], - self.loglikelihood_rolling_responses[len(requests) :], - ) - return ret_resp - - def loglikelihood_single_token( - self, requests: list[LoglikelihoodSingleTokenRequest], override_bs: Optional[int] = None - ) -> list[LoglikelihoodSingleTokenResponse]: - ret_resp, self.loglikelihood_single_token_responses = ( - self.loglikelihood_single_token_responses[: len(requests)], - self.loglikelihood_single_token_responses[len(requests) :], + self.loglikelihood_rolling_responses[: len(docs)], + self.loglikelihood_rolling_responses[len(docs) :], ) return ret_resp def fake_evaluate_task( - task: LightevalTask, lm: FakeModel, max_samples: int = 1, n_fewshot: int = 0, n_fewshot_seeds: int = 1 + lighteval_task: LightevalTask, lm: FakeModel, max_samples: int = 1, n_fewshot: int = 0, n_fewshot_seeds: int = 1 ): # Mock the Registry.get_task_dict method - task_name = f"{task.suite[0]}|{task.name}" + task_name = f"{lighteval_task.suite[0]}|{lighteval_task.name}" - task_dict = {task_name: task} + task_dict = {task_name: lighteval_task} evaluation_tracker = EvaluationTracker(output_dir="outputs") evaluation_tracker.task_config_logger.log(task_dict) # Create a mock Registry class class FakeRegistry(Registry): - def __init__( - self, cache_dir: Optional[str] = None, custom_tasks: Optional[Union[str, Path, ModuleType]] = None - ): - super().__init__(cache_dir=cache_dir, custom_tasks=custom_tasks) + def __init__(self, custom_tasks: Optional[Union[str, Path, ModuleType]] = None): + super().__init__(custom_tasks=custom_tasks) def get_task_dict(self, task_names: list[str]): return task_dict + def get_tasks_configs(self, task: str): + config = lighteval_task.config + config.num_fewshots = n_fewshot + config.truncate_fewshots = False + config.full_name = f"{task_name}|{config.num_fewshots}" + return [config] + # This is due to logger complaining we have no initialised the accelerator # It's hard to mock as it's global singleton if is_accelerate_available(): @@ -147,10 +126,9 @@ def get_task_dict(self, task_names: list[str]): # This is a bit hacky, because there is no way to run end to end, with # dynamic task :(, so we just mock the registry - task_run_string = f"{task_name}|{n_fewshot}|{n_fewshot_seeds}" with patch("lighteval.pipeline.Registry", FakeRegistry): pipeline = Pipeline( - tasks=task_run_string, + tasks=task_name, pipeline_parameters=PipelineParameters(max_samples=max_samples, launcher_type=ParallelismManager.NONE), evaluation_tracker=evaluation_tracker, model=lm, @@ -158,4 +136,4 @@ def get_task_dict(self, task_names: list[str]): ) pipeline.evaluate() - return evaluation_tracker.metrics_logger.metrics_values[f"{task_name}|{n_fewshot}"] + return pipeline.get_results()