Skip to content

Commit 2233a77

Browse files
committed
Added MLMM tasks
1 parent b05c53d commit 2233a77

File tree

6 files changed

+539
-0
lines changed

6 files changed

+539
-0
lines changed

lm_eval/tasks/__init__.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,12 @@
6464
from .aam.all_tasks_registry import TASK_REGISTRY as AAM_TASK_REGISTRY
6565
from .opengptx.all_tasks_registry import TASK_REGISTRY as OGX_TASK_REGISTRY
6666

67+
from .mlmm import multilingual_arc
68+
from .mlmm import multilingual_hellaswag
69+
from .mlmm import multilingual_mmlu
70+
from .mlmm import multilingual_truthfulqa
71+
72+
6773
########################################
6874
# Translation tasks
6975
########################################
@@ -328,6 +334,11 @@
328334
**tmp_new_pawsx.construct_tasks(),
329335
**tmp_new_xnli.construct_tasks(),
330336
**mgsm.construct_tasks(),
337+
# Multilingual OpenLLM Evaluation
338+
**multilingual_arc.create_all_tasks(),
339+
**multilingual_mmlu.create_all_tasks(),
340+
**multilingual_truthfulqa.create_all_tasks(),
341+
**multilingual_hellaswag.create_all_tasks(),
331342
}
332343

333344
# append the luminous (eg. Aleph-Alpha implemented) tasks to the whole registry

lm_eval/tasks/mlmm/__init__.py

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
"""
2+
Tasks from "Multilingual Large Language Models Evaluation Benchmark"
3+
4+
Source: https://github.com/nlp-uoregon/mlmm-evaluation
5+
6+
This repo contains benchmark datasets and evaluation scripts for Multilingual Large Language Models (LLMs). These datasets can be used to evaluate the models across 26 different languages and encompass three distinct tasks: ARC, HellaSwag, and MMLU. This is released as a part of our [Okapi framework](https://github.com/nlp-uoregon/Okapi) for multilingual instruction-tuned LLMs with reinforcement learning from human feedback.
7+
8+
- [**ARC**](https://allenai.org/data/arc): A dataset with 7,787 genuine grade-school level, multiple-choice science questions, assembled to encourage research in advanced question-answering.
9+
- [**HellaSwag**](https://allenai.org/data/hellaswag): HellaSWAG is a dataset for studying grounded commonsense inference. It consists of 70k multiple choice questions about grounded situations: each question comes from one of two domains *activitynet* or *wikihow* with four answer choices about what might happen next in the scene. The correct answer is the (real) sentence for the next event; the three incorrect answers are adversarially generated and human verified, so as to fool machines but not humans.
10+
- [**MMLU**](https://arxiv.org/pdf/2009.03300.pdf): This dataset contains multiple choice questions derived from diverse fields of knowledge. The test covers subjects in the humanities, social sciences, hard sciences, and other essential areas of learning for certain individuals.
11+
12+
Currently, our datasets support 26 languages: Russian, German, Chinese, French, Spanish, Italian, Dutch, Vietnamese, Indonesian, Arabic, Hungarian, Romanian, Danish, Slovak, Ukrainian, Catalan, Serbian, Croatian, Hindi, Bengali, Tamil, Nepali, Malayalam, Marathi, Telugu, and Kannada.
13+
14+
"""
15+
import os
16+
17+
18+
def get_mlmm_dataset_path(dataset_path: str) -> str:
19+
base_path = os.environ.get("MLMM_DATASET_BASE_PATH", None)
20+
21+
if base_path:
22+
dataset_path = os.path.join(base_path, dataset_path)
23+
24+
if not os.path.exists(dataset_path):
25+
raise FileNotFoundError(
26+
f"Dataset path does not exist ({dataset_path}). If you already downloaded the data, try to set the MLMM_DATASET_BASE_PATH environment variable. To download the data, follow the instruction as provided here: https://github.com/nlp-uoregon/mlmm-evaluation/tree/main#basic-usage"
27+
)
28+
29+
return dataset_path
Lines changed: 95 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,95 @@
1+
"""
2+
Think you have Solved Question Answering? Try ARC, the AI2 Reasoning Challenge
3+
https://arxiv.org/pdf/1803.05457.pdf
4+
5+
The ARC dataset consists of 7,787 science exam questions drawn from a variety
6+
of sources, including science questions provided under license by a research
7+
partner affiliated with AI2. These are text-only, English language exam questions
8+
that span several grade levels as indicated in the files. Each question has a
9+
multiple choice structure (typically 4 answer options). The questions are sorted
10+
into a Challenge Set of 2,590 “hard” questions (those that both a retrieval and
11+
a co-occurrence method fail to answer correctly) and an Easy Set of 5,197 questions.
12+
13+
Homepage: https://allenai.org/data/arc
14+
"""
15+
from lm_eval.base import MultipleChoiceTask
16+
from . import get_mlmm_dataset_path
17+
18+
_CITATION = """
19+
@article{Clark2018ThinkYH,
20+
title={Think you have Solved Question Answering? Try ARC, the AI2 Reasoning Challenge},
21+
author={Peter Clark and Isaac Cowhey and Oren Etzioni and Tushar Khot and Ashish Sabharwal and Carissa Schoenick and Oyvind Tafjord},
22+
journal={ArXiv},
23+
year={2018},
24+
volume={abs/1803.05457}
25+
}
26+
"""
27+
28+
LANGS = "ar,bn,ca,da,de,es,eu,fr,gu,hi,hr,hu,hy,id,it,kn,ml,mr,ne,nl,pt,ro,ru,sk,sr,sv,ta,te,uk,vi,zh".split(
29+
","
30+
)
31+
32+
33+
def create_all_tasks():
34+
"""Creates a dictionary of tasks from a list of subjects
35+
:return: {task_name: task}
36+
e.g. {arc_vi: Task, arc_bn: Task}
37+
"""
38+
return {f"mlmm_arc_{lang}": create_task(lang) for lang in LANGS}
39+
40+
41+
def create_task(lang):
42+
class ATest(MultilingualARC):
43+
def __init__(self):
44+
super().__init__(lang)
45+
46+
return ATest
47+
48+
49+
class MultilingualARC(MultipleChoiceTask):
50+
def __init__(self, lang, **kwargs):
51+
self.VERSION = 0
52+
self.lang = lang
53+
self.DATASET_NAME = f"arc_{lang}"
54+
self.DATASET_PATH = get_mlmm_dataset_path("datasets/m_arc")
55+
self.NUM_FEW_SHOT = 25
56+
super().__init__(**kwargs)
57+
58+
def has_training_docs(self):
59+
return True
60+
61+
def has_validation_docs(self):
62+
return True
63+
64+
def has_test_docs(self):
65+
return True
66+
67+
def training_docs(self):
68+
if self._training_docs is None:
69+
self._training_docs = list(map(self._process_doc, self.dataset["train"]))
70+
return self._training_docs
71+
72+
def validation_docs(self):
73+
return map(self._process_doc, self.dataset["validation"])
74+
75+
def test_docs(self):
76+
return map(self._process_doc, self.dataset["test"])
77+
78+
def _process_doc(self, doc):
79+
# NOTE:
80+
out_doc = {
81+
"id": doc["id"],
82+
"query": "Question: " + doc["question"] + "\nAnswer:",
83+
"choices": doc["choices"],
84+
"gold": ["A", "B", "C", "D", "E"].index(doc["answerKey"]),
85+
}
86+
return out_doc
87+
88+
def doc_to_text(self, doc):
89+
return doc["query"]
90+
91+
def should_decontaminate(self):
92+
return True
93+
94+
def doc_to_decontamination_query(self, doc):
95+
return doc["query"]
Lines changed: 101 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,101 @@
1+
"""
2+
HellaSwag: Can a Machine Really Finish Your Sentence?
3+
https://arxiv.org/pdf/1905.07830.pdf
4+
5+
Hellaswag is a commonsense inference challenge dataset. Though its questions are
6+
trivial for humans (>95% accuracy), state-of-the-art models struggle (<48%). This is
7+
achieved via Adversarial Filtering (AF), a data collection paradigm wherein a
8+
series of discriminators iteratively select an adversarial set of machine-generated
9+
wrong answers. AF proves to be surprisingly robust. The key insight is to scale up
10+
the length and complexity of the dataset examples towards a critical 'Goldilocks'
11+
zone wherein generated text is ridiculous to humans, yet often misclassified by
12+
state-of-the-art models.
13+
14+
Homepage: https://rowanzellers.com/hellaswag/
15+
"""
16+
import re
17+
from lm_eval.base import MultipleChoiceTask
18+
from . import get_mlmm_dataset_path
19+
20+
_CITATION = """
21+
@inproceedings{zellers2019hellaswag,
22+
title={HellaSwag: Can a Machine Really Finish Your Sentence?},
23+
author={Zellers, Rowan and Holtzman, Ari and Bisk, Yonatan and Farhadi, Ali and Choi, Yejin},
24+
booktitle ={Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics},
25+
year={2019}
26+
}
27+
"""
28+
29+
LANGS = "ar,bn,ca,da,de,es,eu,fr,gu,hi,hr,hu,hy,id,it,kn,ml,mr,ne,nl,pt,ro,ru,sk,sr,sv,ta,te,uk,vi,zh".split(
30+
","
31+
)
32+
33+
34+
def create_all_tasks():
35+
"""Creates a dictionary of tasks from a list of subjects
36+
:return: {task_name: task}
37+
e.g. {hellaswag_vi: Task, hellaswag_en: Task}
38+
"""
39+
return {f"mlmm_hellaswag_{lang}": create_task(lang) for lang in LANGS}
40+
41+
42+
def create_task(lang):
43+
class ATest(HellaSwag):
44+
def __init__(self):
45+
super().__init__(lang)
46+
47+
return ATest
48+
49+
50+
class HellaSwag(MultipleChoiceTask):
51+
def __init__(self, lang, **kwargs):
52+
self.VERSION = 1
53+
self.lang = lang
54+
self.DATASET_NAME = f"hellaswag_{lang}"
55+
self.DATASET_PATH = get_mlmm_dataset_path("datasets/m_hellaswag")
56+
self.NUM_FEW_SHOT = 0
57+
super().__init__(**kwargs)
58+
59+
def has_training_docs(self):
60+
return False
61+
62+
def has_validation_docs(self):
63+
return True
64+
65+
def has_test_docs(self):
66+
return False
67+
68+
def training_docs(self):
69+
if self._training_docs is None:
70+
self._training_docs = list(map(self._process_doc, self.dataset["train"]))
71+
return self._training_docs
72+
73+
def validation_docs(self):
74+
return map(self._process_doc, self.dataset["validation"])
75+
76+
def _process_doc(self, doc):
77+
ctx = doc["ctx_a"] + " " + doc["ctx_b"].capitalize()
78+
out_doc = {
79+
"query": self.preprocess(doc["activity_label"] + ": " + ctx),
80+
"choices": [self.preprocess(ending) for ending in doc["endings"]],
81+
"gold": int(doc["label"]),
82+
}
83+
return out_doc
84+
85+
@classmethod
86+
def preprocess(cls, text):
87+
text = text.strip()
88+
# NOTE: Brackets are artifacts of the WikiHow dataset portion of HellaSwag.
89+
text = text.replace(" [title]", ". ")
90+
text = re.sub("\\[.*?\\]", "", text)
91+
text = text.replace(" ", " ")
92+
return text
93+
94+
def doc_to_text(self, doc):
95+
return doc["query"]
96+
97+
def should_decontaminate(self):
98+
return True
99+
100+
def doc_to_decontamination_query(self, doc):
101+
return doc["query"]
Lines changed: 116 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,116 @@
1+
"""
2+
Measuring Massive Multitask Language Understanding
3+
https://arxiv.org/pdf/2009.03300.pdf
4+
5+
The Hendryck's Test is a benchmark that measured a text model’s multitask accuracy.
6+
The test covers 57 tasks including elementary mathematics, US history, computer
7+
science, law, and more. To attain high accuracy on this test, models must possess
8+
extensive world knowledge and problem solving ability. By comprehensively evaluating
9+
the breadth and depth of a model’s academic and professional understanding,
10+
Hendryck's Test can be used to analyze models across many tasks and to identify
11+
important shortcomings.
12+
13+
Homepage: https://github.com/hendrycks/test
14+
"""
15+
from lm_eval.base import MultipleChoiceTask
16+
from . import get_mlmm_dataset_path
17+
18+
_CITATION = """
19+
@article{hendryckstest2021,
20+
title={Measuring Massive Multitask Language Understanding},
21+
author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},
22+
journal={Proceedings of the International Conference on Learning Representations (ICLR)},
23+
year={2021}
24+
}
25+
"""
26+
LANGS = "ar,bn,ca,da,de,es,eu,fr,gu,hi,hr,hu,hy,id,it,kn,ml,mr,ne,nl,pt,ro,ru,sk,sr,sv,ta,te,uk,vi,zh".split(
27+
","
28+
)
29+
30+
31+
def create_all_tasks():
32+
"""Creates a dictionary of tasks from a list of subjects
33+
:return: {task_name: task}
34+
e.g. {hendrycksTest-abstract_algebra: Task, hendrycksTest-anatomy: Task}
35+
"""
36+
return {f"mlmm_mmlu_{lang}": create_task(lang) for lang in LANGS}
37+
38+
39+
def create_task(lang):
40+
class HendrycksTest(GeneralHendrycksTest):
41+
def __init__(self):
42+
super().__init__(lang)
43+
44+
return HendrycksTest
45+
46+
47+
class GeneralHendrycksTest(MultipleChoiceTask):
48+
VERSION = 0
49+
NUM_FEW_SHOT = 25
50+
DATASET_NAME = None
51+
52+
def __init__(self, lang):
53+
self.DATASET_NAME = f"mmlu_{lang}"
54+
self.DATASET_PATH = get_mlmm_dataset_path("datasets/m_mmlu")
55+
56+
super().__init__()
57+
58+
def has_training_docs(self):
59+
return False
60+
61+
def has_validation_docs(self):
62+
return True
63+
64+
def has_test_docs(self):
65+
return True
66+
67+
def validation_docs(self):
68+
return map(self._process_doc, self.dataset["validation"])
69+
70+
def test_docs(self):
71+
return map(self._process_doc, self.dataset["test"])
72+
73+
def _process_doc(self, doc):
74+
def format_example(doc, keys):
75+
"""
76+
Question: <prompt>
77+
Choices:
78+
A. <choice1>
79+
B. <choice2>
80+
C. <choice3>
81+
D. <choice4>
82+
Answer:
83+
"""
84+
prompt = "Question: " + doc["question"] + "\nChoices:\n"
85+
prompt += "".join(
86+
[f"{key}. {choice}\n" for key, choice in zip(keys, doc["choices"])]
87+
)
88+
prompt += "Answer:"
89+
return prompt
90+
91+
keys = ["A", "B", "C", "D"]
92+
return {
93+
"query": format_example(doc, keys),
94+
"choices": doc["choices"],
95+
"gold": keys.index(doc["answer"])
96+
if isinstance(doc["answer"], str)
97+
else doc["answer"],
98+
}
99+
100+
def fewshot_examples(self, k, rnd):
101+
# fewshot_examples is not just sampling from train_docs because dev is
102+
# in the same distribution as val/test but auxiliary_train isn't
103+
104+
if self._fewshot_docs is None:
105+
self._fewshot_docs = list(map(self._process_doc, self.dataset["dev"]))
106+
107+
return rnd.sample(list(self._fewshot_docs), k)
108+
109+
def doc_to_text(self, doc):
110+
return doc["query"]
111+
112+
def should_decontaminate(self):
113+
return True
114+
115+
def doc_to_decontamination_query(self, doc):
116+
return doc["query"]

0 commit comments

Comments
 (0)