sts_task / README.md
bcai001's picture
Update README.md
9704e48 verified
metadata
license: apache-2.0
task_categories:
  - text-classification
language:
  - en
! pip install python-Levenshtein
! pip install fuzzywuzzy

import pandas as pd
from datasets import load_dataset
from fuzzywuzzy import fuzz

dataset_name_list = [
    "mteb/sts12-sts",
    "mteb/sts13-sts",
    "mteb/sts14-sts",
    "mteb/sts15-sts",
    "mteb/sts16-sts",
    "mteb/stsbenchmark-sts",
    "mteb/sickr-sts",
]
dataset_dict = { _[5:-4]:load_dataset(_) for _ in dataset_name_list}


df_list = []
for dataset_name, datasetDict in dataset_dict.items():
    for split_name, dataset in datasetDict.items():
        df = pd.DataFrame(dataset)
        df = df[['sentence1', 'sentence2', 'score']]
        df['dataset'] = dataset_name
        df['split'] = split_name

        df = df[['dataset', 'split', 'sentence1', 'sentence2', 'score']]

        df_list.append(df)
df = pd.concat(df_list, axis=0)

def text_sim(sent0, sent1):
    is_str = False
    if isinstance(sent0, str):
        sent0 = [sent0]
        sent1 = [sent1]
        is_str = True
    scores = []
    for s1, s2 in zip(sent0, sent1):
        set1 = set(s1.split(' '))
        # print(set1)
        set2 = set(s2.split(' '))
        # print(set2)
        # 计算交集和并集
        intersection = set1.intersection(set2)
        union = set1.union(set2)

        # 计算雅可比相似度
        similarity = len(intersection) / len(union)

        scores.append(similarity )
    return scores[0] if is_str else scores

print(text_sim('hello', 'hello world'))

df['text_sim'] = df.apply(lambda row :int(text_sim(row['sentence1'].lower(), row['sentence2'].lower()) * 100 + 0.5) / 100, axis=1)
df['fuzz_sim'] = df.apply(lambda row :fuzz.ratio(row['sentence1'].lower(), row['sentence2'].lower()) / 100, axis=1)
df['scaled_score'] = df.apply(lambda row : row['score'] / 5, axis=1)