Skip to content

Commit

Permalink
fix: Add Indic STS benchmark dataset (#524)
Browse files Browse the repository at this point in the history
* add Indic STS benchmark dataset

* update metadata for Indic STS benchmark

* add points for the contribution

* update reviewer name in points

* downsample the test set size

* Update mteb/tasks/STS/multilingual/IndicCrosslingualSTS.py

---------

Co-authored-by: Kenneth Enevoldsen <[email protected]>
  • Loading branch information
jaygala24 and KennethEnevoldsen authored Apr 23, 2024
1 parent 5370b44 commit 1f26615
Show file tree
Hide file tree
Showing 5 changed files with 465 additions and 0 deletions.
3 changes: 3 additions & 0 deletions docs/mmteb/points/524.jsonl
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
{"GitHub": "jaygala24", "New dataset": 30}
{"GitHub": "digantamisra98", "New dataset": 20}
{"GitHub": "KennethEnevoldsen", "Review PR": 2}
1 change: 1 addition & 0 deletions mteb/tasks/STS/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
from .jpn.JSTS import *
from .kor.KlueSTS import *
from .kor.KorSTS import *
from .multilingual.IndicCrosslingualSTS import *
from .multilingual.STS17CrosslingualSTS import *
from .multilingual.STS22CrosslingualSTS import *
from .multilingual.STSBenchmarkMultilingualSTS import *
Expand Down
109 changes: 109 additions & 0 deletions mteb/tasks/STS/multilingual/IndicCrosslingualSTS.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,109 @@
from __future__ import annotations

import datasets

from mteb.abstasks.TaskMetadata import TaskMetadata

from ....abstasks import AbsTaskSTS, MultilingualTask

_LANGUAGES = {
"en-as": ["eng-Latn", "asm-Beng"],
"en-bn": ["eng-Latn", "ben-Beng"],
"en-gu": ["eng-Latn", "guj-Gujr"],
"en-hi": ["eng-Latn", "hin-Deva"],
"en-kn": ["eng-Latn", "kan-Knda"],
"en-ml": ["eng-Latn", "mal-Mlym"],
"en-mr": ["eng-Latn", "mar-Deva"],
"en-or": ["eng-Latn", "ory-Orya"],
"en-pa": ["eng-Latn", "pan-Guru"],
"en-ta": ["eng-Latn", "tam-Taml"],
"en-te": ["eng-Latn", "tel-Telu"],
"en-ur": ["eng-Latn", "urd-Arab"],
}


def categorize_float(float_value):
left_bound = int(float_value)
right_bound = left_bound + 1
if float_value - left_bound < right_bound - float_value:
return left_bound
else:
return right_bound


class IndicCrosslingualSTS(AbsTaskSTS, MultilingualTask):
metadata = TaskMetadata(
name="IndicCrosslingualSTS",
dataset={
"path": "jaygala24/indic_sts",
"revision": "16abc16bea9e38262a8a3a74fd71ce2da51a5c3b",
},
description="This is a Semantic Textual Similarity testset between English and 12 high-resource Indic languages.",
reference="https://huggingface.co/datasets/jaygala24/indic_sts",
type="STS",
category="s2s",
eval_splits=["test"],
eval_langs=_LANGUAGES,
main_score="cosine_spearman",
date=("2021-04-30", "2021-06-09"),
form=["written", "spoken"],
domains=["News", "Non-fiction", "Web", "Spoken", "Government"],
task_subtypes=[],
license="CC0",
socioeconomic_status="mixed",
annotations_creators="expert-annotated",
dialect=[],
text_creation="created",
bibtex_citation="""@article{10.1162/tacl_a_00452,
author = {Ramesh, Gowtham and Doddapaneni, Sumanth and Bheemaraj, Aravinth and Jobanputra, Mayank and AK, Raghavan and Sharma, Ajitesh and Sahoo, Sujit and Diddee, Harshita and J, Mahalakshmi and Kakwani, Divyanshu and Kumar, Navneet and Pradeep, Aswin and Nagaraj, Srihari and Deepak, Kumar and Raghavan, Vivek and Kunchukuttan, Anoop and Kumar, Pratyush and Khapra, Mitesh Shantadevi},
title = "{Samanantar: The Largest Publicly Available Parallel Corpora Collection for 11 Indic Languages}",
journal = {Transactions of the Association for Computational Linguistics},
volume = {10},
pages = {145-162},
year = {2022},
month = {02},
issn = {2307-387X},
doi = {10.1162/tacl_a_00452},
url = {https://doi.org/10.1162/tacl\_a\_00452},
eprint = {https://direct.mit.edu/tacl/article-pdf/doi/10.1162/tacl\_a\_00452/1987010/tacl\_a\_00452.pdf},
}""",
n_samples={"test": 10020},
avg_character_length={"test": 76.22},
)

@property
def metadata_dict(self) -> dict[str, str]:
metadata_dict = super().metadata_dict
metadata_dict["min_score"] = 0
metadata_dict["max_score"] = 5
return metadata_dict

def load_data(self, **kwargs):
"""Load dataset from HuggingFace hub"""
if self.data_loaded:
return

self.dataset = {}
for lang in self.langs:
self.dataset[lang] = datasets.load_dataset(
name=lang, **self.metadata_dict["dataset"]
)
self.dataset_transform()
self.data_loaded = True

def dataset_transform(self) -> None:
# Convert to standard format
for lang in self.langs:
self.dataset[lang] = self.dataset[lang].rename_columns(
{"english_sentence": "sentence1", "indic_sentence": "sentence2"}
)
self.dataset[lang] = (
self.dataset[lang]
.map(lambda x: {"label": round(x["score"])})
.class_encode_column("label")
)
self.dataset[lang]["test"] = self.dataset[lang]["test"].train_test_split(
test_size=256,
seed=self.seed,
stratify_by_column="label",
)["test"]
176 changes: 176 additions & 0 deletions results/intfloat__multilingual-e5-small/IndicCrosslingualSTS.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,176 @@
{
"dataset_revision": "16abc16bea9e38262a8a3a74fd71ce2da51a5c3b",
"mteb_dataset_name": "IndicCrosslingualSTS",
"mteb_version": "1.7.15",
"test": {
"en-as": {
"cos_sim": {
"pearson": 0.47555549489993254,
"spearman": 0.4576670514187138
},
"euclidean": {
"pearson": 0.46661486840777633,
"spearman": 0.4576670514187138
},
"manhattan": {
"pearson": 0.4603656994362554,
"spearman": 0.44882988659171386
}
},
"en-bn": {
"cos_sim": {
"pearson": 0.5373161163227228,
"spearman": 0.5482828600428461
},
"euclidean": {
"pearson": 0.5209027161136183,
"spearman": 0.5482828600428461
},
"manhattan": {
"pearson": 0.5216392960655964,
"spearman": 0.5463201969114684
}
},
"en-gu": {
"cos_sim": {
"pearson": 0.49292981540590064,
"spearman": 0.5195144255851953
},
"euclidean": {
"pearson": 0.49078110147353876,
"spearman": 0.5195144255851953
},
"manhattan": {
"pearson": 0.48258082887043946,
"spearman": 0.51158171573939
}
},
"en-hi": {
"cos_sim": {
"pearson": 0.6378705358195362,
"spearman": 0.644707977928238
},
"euclidean": {
"pearson": 0.6199983014282475,
"spearman": 0.644707977928238
},
"manhattan": {
"pearson": 0.6148348771501904,
"spearman": 0.6343584330238747
}
},
"en-kn": {
"cos_sim": {
"pearson": 0.5677754956742882,
"spearman": 0.5620861501733149
},
"euclidean": {
"pearson": 0.5275190534412112,
"spearman": 0.5620861501733149
},
"manhattan": {
"pearson": 0.5310702474583724,
"spearman": 0.5607695921081409
}
},
"en-ml": {
"cos_sim": {
"pearson": 0.5515655939071619,
"spearman": 0.5831765071447772
},
"euclidean": {
"pearson": 0.553434510164889,
"spearman": 0.5831765071447772
},
"manhattan": {
"pearson": 0.5541340488641183,
"spearman": 0.5770287985150414
}
},
"en-mr": {
"cos_sim": {
"pearson": 0.4488195142274059,
"spearman": 0.44069492362689155
},
"euclidean": {
"pearson": 0.43461433557514006,
"spearman": 0.4406948448232889
},
"manhattan": {
"pearson": 0.4343562798051149,
"spearman": 0.439004147346604
}
},
"en-or": {
"cos_sim": {
"pearson": 0.07827341866265802,
"spearman": 0.13091202976340055
},
"euclidean": {
"pearson": 0.08216597605128603,
"spearman": 0.13091202976340055
},
"manhattan": {
"pearson": 0.08004519439482959,
"spearman": 0.13397566048836274
}
},
"en-pa": {
"cos_sim": {
"pearson": 0.5415264655963459,
"spearman": 0.5169130851062151
},
"euclidean": {
"pearson": 0.5336581084547132,
"spearman": 0.5169130851062151
},
"manhattan": {
"pearson": 0.5287421574559961,
"spearman": 0.5122154010586134
}
},
"en-ta": {
"cos_sim": {
"pearson": 0.48112639105157445,
"spearman": 0.5020988459087952
},
"euclidean": {
"pearson": 0.47002156849941895,
"spearman": 0.5020988459087952
},
"manhattan": {
"pearson": 0.4709852200774878,
"spearman": 0.5042777203929921
}
},
"en-te": {
"cos_sim": {
"pearson": 0.43500374615692305,
"spearman": 0.4544249544537326
},
"euclidean": {
"pearson": 0.42175556833077615,
"spearman": 0.4544249544537326
},
"manhattan": {
"pearson": 0.40947219839108917,
"spearman": 0.4459731456040252
}
},
"en-ur": {
"cos_sim": {
"pearson": 0.1407411406292167,
"spearman": 0.1513164422185654
},
"euclidean": {
"pearson": 0.14318011150983007,
"spearman": 0.1513164422185654
},
"manhattan": {
"pearson": 0.1456973138190262,
"spearman": 0.14856313516465242
}
},
"evaluation_time": 4.53
}
}
Loading

0 comments on commit 1f26615

Please sign in to comment.