Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions lm_eval/tasks/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,12 @@
from .aam.all_tasks_registry import TASK_REGISTRY as AAM_TASK_REGISTRY
from .opengptx.all_tasks_registry import TASK_REGISTRY as OGX_TASK_REGISTRY

from .mlmm import multilingual_arc
from .mlmm import multilingual_hellaswag
from .mlmm import multilingual_mmlu
from .mlmm import multilingual_truthfulqa


########################################
# Translation tasks
########################################
Expand Down Expand Up @@ -328,6 +334,11 @@
**tmp_new_pawsx.construct_tasks(),
**tmp_new_xnli.construct_tasks(),
**mgsm.construct_tasks(),
# Multilingual OpenLLM Evaluation
**multilingual_arc.create_all_tasks(),
**multilingual_mmlu.create_all_tasks(),
**multilingual_truthfulqa.create_all_tasks(),
**multilingual_hellaswag.create_all_tasks(),
}

# append the luminous (eg. Aleph-Alpha implemented) tasks to the whole registry
Expand Down
14 changes: 14 additions & 0 deletions lm_eval/tasks/mlmm/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
"""
Tasks from "Multilingual Large Language Models Evaluation Benchmark"

Source: https://github.com/nlp-uoregon/mlmm-evaluation

This repo contains benchmark datasets and evaluation scripts for Multilingual Large Language Models (LLMs). These datasets can be used to evaluate the models across 26 different languages and encompass three distinct tasks: ARC, HellaSwag, and MMLU. This is released as a part of our [Okapi framework](https://github.com/nlp-uoregon/Okapi) for multilingual instruction-tuned LLMs with reinforcement learning from human feedback.

- [**ARC**](https://allenai.org/data/arc): A dataset with 7,787 genuine grade-school level, multiple-choice science questions, assembled to encourage research in advanced question-answering.
- [**HellaSwag**](https://allenai.org/data/hellaswag): HellaSWAG is a dataset for studying grounded commonsense inference. It consists of 70k multiple choice questions about grounded situations: each question comes from one of two domains *activitynet* or *wikihow* with four answer choices about what might happen next in the scene. The correct answer is the (real) sentence for the next event; the three incorrect answers are adversarially generated and human verified, so as to fool machines but not humans.
- [**MMLU**](https://arxiv.org/pdf/2009.03300.pdf): This dataset contains multiple choice questions derived from diverse fields of knowledge. The test covers subjects in the humanities, social sciences, hard sciences, and other essential areas of learning for certain individuals.

Currently, our datasets support 26 languages: Russian, German, Chinese, French, Spanish, Italian, Dutch, Vietnamese, Indonesian, Arabic, Hungarian, Romanian, Danish, Slovak, Ukrainian, Catalan, Serbian, Croatian, Hindi, Bengali, Tamil, Nepali, Malayalam, Marathi, Telugu, and Kannada.

"""
94 changes: 94 additions & 0 deletions lm_eval/tasks/mlmm/multilingual_arc.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
"""
Think you have Solved Question Answering? Try ARC, the AI2 Reasoning Challenge
https://arxiv.org/pdf/1803.05457.pdf

The ARC dataset consists of 7,787 science exam questions drawn from a variety
of sources, including science questions provided under license by a research
partner affiliated with AI2. These are text-only, English language exam questions
that span several grade levels as indicated in the files. Each question has a
multiple choice structure (typically 4 answer options). The questions are sorted
into a Challenge Set of 2,590 “hard” questions (those that both a retrieval and
a co-occurrence method fail to answer correctly) and an Easy Set of 5,197 questions.

Homepage: https://allenai.org/data/arc
"""
from lm_eval.base import MultipleChoiceTask

_CITATION = """
@article{Clark2018ThinkYH,
title={Think you have Solved Question Answering? Try ARC, the AI2 Reasoning Challenge},
author={Peter Clark and Isaac Cowhey and Oren Etzioni and Tushar Khot and Ashish Sabharwal and Carissa Schoenick and Oyvind Tafjord},
journal={ArXiv},
year={2018},
volume={abs/1803.05457}
}
"""

LANGS = "ar,bn,ca,da,de,es,eu,fr,gu,hi,hr,hu,hy,id,it,kn,ml,mr,ne,nl,pt,ro,ru,sk,sr,sv,ta,te,uk,vi,zh".split(
","
)


def create_all_tasks():
"""Creates a dictionary of tasks from a list of subjects
:return: {task_name: task}
e.g. {arc_vi: Task, arc_bn: Task}
"""
return {f"mlmm_arc_{lang}": create_task(lang) for lang in LANGS}


def create_task(lang):
class ATest(MultilingualARC):
def __init__(self):
super().__init__(lang)

return ATest


class MultilingualARC(MultipleChoiceTask):
def __init__(self, lang, **kwargs):
self.VERSION = 0
self.lang = lang
self.DATASET_NAME = f"arc_{lang}"
self.DATASET_PATH = "malteos/m_arc"
self.NUM_FEW_SHOT = 25
super().__init__(**kwargs)

def has_training_docs(self):
return True

def has_validation_docs(self):
return True

def has_test_docs(self):
return True

def training_docs(self):
if self._training_docs is None:
self._training_docs = list(map(self._process_doc, self.dataset["train"]))
return self._training_docs

def validation_docs(self):
return map(self._process_doc, self.dataset["validation"])

def test_docs(self):
return map(self._process_doc, self.dataset["test"])

def _process_doc(self, doc):
# NOTE:
out_doc = {
"id": doc["id"],
"query": "Question: " + doc["question"] + "\nAnswer:",
"choices": doc["choices"],
"gold": ["A", "B", "C", "D", "E"].index(doc["answerKey"]),
}
return out_doc

def doc_to_text(self, doc):
return doc["query"]

def should_decontaminate(self):
return True

def doc_to_decontamination_query(self, doc):
return doc["query"]
100 changes: 100 additions & 0 deletions lm_eval/tasks/mlmm/multilingual_hellaswag.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
"""
HellaSwag: Can a Machine Really Finish Your Sentence?
https://arxiv.org/pdf/1905.07830.pdf

Hellaswag is a commonsense inference challenge dataset. Though its questions are
trivial for humans (>95% accuracy), state-of-the-art models struggle (<48%). This is
achieved via Adversarial Filtering (AF), a data collection paradigm wherein a
series of discriminators iteratively select an adversarial set of machine-generated
wrong answers. AF proves to be surprisingly robust. The key insight is to scale up
the length and complexity of the dataset examples towards a critical 'Goldilocks'
zone wherein generated text is ridiculous to humans, yet often misclassified by
state-of-the-art models.

Homepage: https://rowanzellers.com/hellaswag/
"""
import re
from lm_eval.base import MultipleChoiceTask

_CITATION = """
@inproceedings{zellers2019hellaswag,
title={HellaSwag: Can a Machine Really Finish Your Sentence?},
author={Zellers, Rowan and Holtzman, Ari and Bisk, Yonatan and Farhadi, Ali and Choi, Yejin},
booktitle ={Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics},
year={2019}
}
"""

LANGS = "ar,bn,ca,da,de,es,eu,fr,gu,hi,hr,hu,hy,id,it,kn,ml,mr,ne,nl,pt,ro,ru,sk,sr,sv,ta,te,uk,vi,zh".split(
","
)


def create_all_tasks():
"""Creates a dictionary of tasks from a list of subjects
:return: {task_name: task}
e.g. {hellaswag_vi: Task, hellaswag_en: Task}
"""
return {f"mlmm_hellaswag_{lang}": create_task(lang) for lang in LANGS}


def create_task(lang):
class ATest(HellaSwag):
def __init__(self):
super().__init__(lang)

return ATest


class HellaSwag(MultipleChoiceTask):
def __init__(self, lang, **kwargs):
self.VERSION = 1
self.lang = lang
self.DATASET_NAME = f"hellaswag_{lang}"
self.DATASET_PATH = "malteos/m_hellaswag"
self.NUM_FEW_SHOT = 0
super().__init__(**kwargs)

def has_training_docs(self):
return False

def has_validation_docs(self):
return True

def has_test_docs(self):
return False

def training_docs(self):
if self._training_docs is None:
self._training_docs = list(map(self._process_doc, self.dataset["train"]))
return self._training_docs

def validation_docs(self):
return map(self._process_doc, self.dataset["validation"])

def _process_doc(self, doc):
ctx = doc["ctx_a"] + " " + doc["ctx_b"].capitalize()
out_doc = {
"query": self.preprocess(doc["activity_label"] + ": " + ctx),
"choices": [self.preprocess(ending) for ending in doc["endings"]],
"gold": int(doc["label"]),
}
return out_doc

@classmethod
def preprocess(cls, text):
text = text.strip()
# NOTE: Brackets are artifacts of the WikiHow dataset portion of HellaSwag.
text = text.replace(" [title]", ". ")
text = re.sub("\\[.*?\\]", "", text)
text = text.replace(" ", " ")
return text

def doc_to_text(self, doc):
return doc["query"]

def should_decontaminate(self):
return True

def doc_to_decontamination_query(self, doc):
return doc["query"]
Loading