Datasets:

Languages:
Japanese
License:
llm-jp-eval / llm-jp-eval.py
Kosuke-Yamada's picture
go back to the start
590153c
from __future__ import annotations
import json
from pathlib import Path
from typing import Generator
import datasets
from .base import BaseDatasetProcessor
from .chabsa import ChabsaDatasetProcessor
from .coreference import WikipediaCoreferenceDatasetProcessor
from .dependency import WikipediaDependencyDatasetProcessor
from .jamp import JAMPDatasetProcessor
from .janli import JaNLIDatasetProcessor
from .jcommonsenseqa import JCommonsenseQADatasetProcessor
from .jemhopqa import JEMHopQADatasetProcessor
from .jnli import JNLIDatasetProcessor
from .jsem import JSeMDatasetProcessor
from .jsick import JSICKDatasetProcessor
from .jsquad import JSQuADDatasetProcessor
from .jsts import JSTSDatasetProcessor
from .mawps import MawpsDatasetProcessor
from .ner import WikipediaNERDatasetProcessor
from .niilc import NIILCDatasetProcessor
from .pas import WikipediaPASDatasetProcessor
from .reading import WikipediaReadingDatasetProcessor
from .wiki_base import WikipediaBaseDatasetProcessor
_CITATION = ""
_DESCRIPTION = "These are datasets including the benchmark 'llm-jp-eval.'"
_HOMEPAGE = "https://github.com/llm-jp/llm-jp-eval/tree/v1.3.0"
_LICENSE = "This work is license under Apache-2.0 license"
_VERSION = datasets.Version("1.3.0")
class LlmJpEvalConfig(datasets.BuilderConfig):
def __init__(
self,
name: str = "default",
version: datasets.Version | str | None = _VERSION,
data_dir: str | None = None,
data_files: datasets.data_files.DataFilesDict | None = None,
description: str | None = _DESCRIPTION,
output_dir: str = "./data_llm-jp-eval",
) -> None:
super().__init__(
name=name,
version=version,
data_dir=data_dir,
data_files=data_files,
description=description,
)
self.output_dir = output_dir
class LlmJpEval(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
LlmJpEvalConfig(name="jamp"),
LlmJpEvalConfig(name="janli"),
LlmJpEvalConfig(name="jnli"),
LlmJpEvalConfig(name="jsem"),
LlmJpEvalConfig(name="jsick"),
LlmJpEvalConfig(name="jemhopqa"),
LlmJpEvalConfig(name="niilc"),
LlmJpEvalConfig(name="jsquad"),
LlmJpEvalConfig(name="jcommonsenseqa"),
LlmJpEvalConfig(name="chabsa"),
LlmJpEvalConfig(name="wiki_reading"),
LlmJpEvalConfig(name="wiki_ner"),
LlmJpEvalConfig(name="wiki_dependency"),
LlmJpEvalConfig(name="wiki_pas"),
LlmJpEvalConfig(name="wiki_coreference"),
LlmJpEvalConfig(name="mawps"),
LlmJpEvalConfig(name="jsts"),
]
def _info(self) -> datasets.DatasetInfo:
return datasets.DatasetInfo(
description=_DESCRIPTION,
citation=_CITATION,
homepage=_HOMEPAGE,
license=_LICENSE,
features=datasets.Features(
{
"input": datasets.Value("string"),
"output": datasets.Value("string"),
}
),
)
def _split_generators(
self, dl_manager: datasets.DownloadManager
) -> list[datasets.SplitGenerator]:
output_dir = Path(self.config.output_dir)
version_name = self.config.version.version_str
dataset_name = self.config.name
if dataset_name == "jamp":
processor = JAMPDatasetProcessor
elif dataset_name == "janli":
processor = JaNLIDatasetProcessor
elif dataset_name == "jnli":
processor = JNLIDatasetProcessor
elif dataset_name == "jsem":
processor = JSeMDatasetProcessor
elif dataset_name == "jsick":
processor = JSICKDatasetProcessor
elif dataset_name == "jemhopqa":
processor = JEMHopQADatasetProcessor
elif dataset_name == "niilc":
processor = NIILCDatasetProcessor
elif dataset_name == "jsquad":
processor = JSQuADDatasetProcessor
elif dataset_name == "jcommonsenseqa":
processor = JCommonsenseQADatasetProcessor
elif dataset_name == "chabsa":
processor = ChabsaDatasetProcessor
elif dataset_name == "wiki_reading":
processor = WikipediaReadingDatasetProcessor
elif dataset_name == "wiki_ner":
processor = WikipediaNERDatasetProcessor
elif dataset_name == "wiki_dependency":
processor = WikipediaDependencyDatasetProcessor
elif dataset_name == "wiki_pas":
processor = WikipediaPASDatasetProcessor
elif dataset_name == "wiki_coreference":
processor = WikipediaCoreferenceDatasetProcessor
elif dataset_name == "mawps":
processor = MawpsDatasetProcessor
elif dataset_name == "jsts":
processor = JSTSDatasetProcessor
processor(output_dir, version_name).pipeline()
eval_dir = output_dir / version_name / "evaluation"
with (eval_dir / "train" / f"{dataset_name}.json").open(encoding="utf-8") as f:
train_data = json.load(f)
with (eval_dir / "dev" / f"{dataset_name}.json").open(encoding="utf-8") as f:
validation_data = json.load(f)
with (eval_dir / "test" / f"{dataset_name}.json").open(encoding="utf-8") as f:
test_data = json.load(f)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"data": train_data["samples"]}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"data": validation_data["samples"]},
),
datasets.SplitGenerator(
name=datasets.Split.TEST, gen_kwargs={"data": test_data["samples"]}
),
]
def _generate_examples(self, data: list[dict[str, str]]) -> Generator:
for i, d in enumerate(data):
yield i, d