Datasets:
Tasks:
Text Classification
Modalities:
Text
Sub-tasks:
multi-class-classification
Languages:
English
Size:
10K - 100K
from collections import Counter | |
from datasets import load_dataset, set_caching_enabled | |
# If you need to force clear the cache | |
# set_caching_enabled(False) | |
# source = "HoC.py" | |
source = "qanastek/HoC" | |
dataset = load_dataset(source) | |
# dataset = load_dataset(source, "HoC") | |
print(dataset) | |
f = dataset["validation"][0] | |
print(f) | |
print() | |
print("#"*100) | |
print() | |
lengths = [] | |
for e in dataset["train"]: | |
l = len(e["label"]) | |
if l == 0 or l >= 4: | |
print(l, " => ", e, "\n") | |
lengths.append(l) | |
for e in dataset["validation"]: | |
l = len(e["label"]) | |
if l == 0 or l >= 4: | |
print(l, " => ", e, "\n") | |
lengths.append(l) | |
for e in dataset["test"]: | |
l = len(e["label"]) | |
if l == 0 or l >= 4: | |
print(l, " => ", e, "\n") | |
lengths.append(l) | |
print(Counter(lengths)) | |