Datasets:

Languages:
English
ArXiv:
License:
multi_re_qa / dataset_infos.json
system's picture
system HF staff
Update files from the datasets library (from 1.2.0)
9c02eb9
raw
history blame
17.2 kB
{"SearchQA": {"description": "MultiReQA contains the sentence boundary annotation from eight publicly available QA datasets including SearchQA, TriviaQA, HotpotQA, NaturalQuestions, SQuAD, BioASQ, RelationExtraction, and TextbookQA. Five of these datasets, including SearchQA, TriviaQA, HotpotQA, NaturalQuestions, SQuAD, contain both training and test data, and three, including BioASQ, RelationExtraction, TextbookQA, contain only the test data", "citation": "@misc{m2020multireqa,\n title={MultiReQA: A Cross-Domain Evaluation for Retrieval Question Answering Models},\n author={Mandy Guo and Yinfei Yang and Daniel Cer and Qinlan Shen and Noah Constant},\n year={2020},\n eprint={2005.02507},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}", "homepage": "https://github.com/google-research-datasets/MultiReQA", "license": "", "features": {"candidate_id": {"dtype": "string", "id": null, "_type": "Value"}, "response_start": {"dtype": "int32", "id": null, "_type": "Value"}, "response_end": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "multi_re_qa", "config_name": "SearchQA", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 183902877, "num_examples": 3163801, "dataset_name": "multi_re_qa"}, "validation": {"name": "validation", "num_bytes": 26439174, "num_examples": 454836, "dataset_name": "multi_re_qa"}}, "download_checksums": {"https://github.com/google-research-datasets/MultiReQA/raw/master/data/train/SearchQA/candidates.json.gz": {"num_bytes": 32368716, "checksum": "adf6fe37aff7929b7be33fb105571b80db89adc3cee2093c8357b678c1b4c76c"}, "https://github.com/google-research-datasets/MultiReQA/raw/master/data/dev/SearchQA/candidates.json.gz": {"num_bytes": 4623243, "checksum": "00c361a17babd40b9144a570bbadacba37136b638f0a1f55c49fe58fca1606a9"}}, "download_size": 36991959, "post_processing_size": null, "dataset_size": 210342051, "size_in_bytes": 247334010}, "TriviaQA": {"description": "MultiReQA contains the sentence boundary annotation from eight publicly available QA datasets including SearchQA, TriviaQA, HotpotQA, NaturalQuestions, SQuAD, BioASQ, RelationExtraction, and TextbookQA. Five of these datasets, including SearchQA, TriviaQA, HotpotQA, NaturalQuestions, SQuAD, contain both training and test data, and three, including BioASQ, RelationExtraction, TextbookQA, contain only the test data", "citation": "@misc{m2020multireqa,\n title={MultiReQA: A Cross-Domain Evaluation for Retrieval Question Answering Models},\n author={Mandy Guo and Yinfei Yang and Daniel Cer and Qinlan Shen and Noah Constant},\n year={2020},\n eprint={2005.02507},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}", "homepage": "https://github.com/google-research-datasets/MultiReQA", "license": "", "features": {"candidate_id": {"dtype": "string", "id": null, "_type": "Value"}, "response_start": {"dtype": "int32", "id": null, "_type": "Value"}, "response_end": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "multi_re_qa", "config_name": "TriviaQA", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 107326326, "num_examples": 1893674, "dataset_name": "multi_re_qa"}, "validation": {"name": "validation", "num_bytes": 13508062, "num_examples": 238339, "dataset_name": "multi_re_qa"}}, "download_checksums": {"https://github.com/google-research-datasets/MultiReQA/raw/master/data/train/TriviaQA/candidates.json.gz": {"num_bytes": 19336595, "checksum": "ff43a7ec9243f4c5631ec50fa799f0dfbcf4dec2b4116da3aaacffe0b7fe22ee"}, "https://github.com/google-research-datasets/MultiReQA/raw/master/data/dev/TriviaQA/candidates.json.gz": {"num_bytes": 2413807, "checksum": "bf2f41e4f85fcdc163a6cb2ad7f1f711c185463ee701b4e29c9da5c19d5da641"}}, "download_size": 21750402, "post_processing_size": null, "dataset_size": 120834388, "size_in_bytes": 142584790}, "HotpotQA": {"description": "MultiReQA contains the sentence boundary annotation from eight publicly available QA datasets including SearchQA, TriviaQA, HotpotQA, NaturalQuestions, SQuAD, BioASQ, RelationExtraction, and TextbookQA. Five of these datasets, including SearchQA, TriviaQA, HotpotQA, NaturalQuestions, SQuAD, contain both training and test data, and three, including BioASQ, RelationExtraction, TextbookQA, contain only the test data", "citation": "@misc{m2020multireqa,\n title={MultiReQA: A Cross-Domain Evaluation for Retrieval Question Answering Models},\n author={Mandy Guo and Yinfei Yang and Daniel Cer and Qinlan Shen and Noah Constant},\n year={2020},\n eprint={2005.02507},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}", "homepage": "https://github.com/google-research-datasets/MultiReQA", "license": "", "features": {"candidate_id": {"dtype": "string", "id": null, "_type": "Value"}, "response_start": {"dtype": "int32", "id": null, "_type": "Value"}, "response_end": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "multi_re_qa", "config_name": "HotpotQA", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 29516866, "num_examples": 508879, "dataset_name": "multi_re_qa"}, "validation": {"name": "validation", "num_bytes": 3027229, "num_examples": 52191, "dataset_name": "multi_re_qa"}}, "download_checksums": {"https://github.com/google-research-datasets/MultiReQA/raw/master/data/train/HotpotQA/candidates.json.gz": {"num_bytes": 5760488, "checksum": "1e19145a13aea9101edaaa3e79f19518b9bf0b1539e1912f5a4bec8c406bcbbc"}, "https://github.com/google-research-datasets/MultiReQA/raw/master/data/dev/HotpotQA/candidates.json.gz": {"num_bytes": 582901, "checksum": "f359dde781dc7772d817c81d1f1c28fcdedb8858b4502a7bd7234d1da5e10395"}}, "download_size": 6343389, "post_processing_size": null, "dataset_size": 32544095, "size_in_bytes": 38887484}, "SQuAD": {"description": "MultiReQA contains the sentence boundary annotation from eight publicly available QA datasets including SearchQA, TriviaQA, HotpotQA, NaturalQuestions, SQuAD, BioASQ, RelationExtraction, and TextbookQA. Five of these datasets, including SearchQA, TriviaQA, HotpotQA, NaturalQuestions, SQuAD, contain both training and test data, and three, including BioASQ, RelationExtraction, TextbookQA, contain only the test data", "citation": "@misc{m2020multireqa,\n title={MultiReQA: A Cross-Domain Evaluation for Retrieval Question Answering Models},\n author={Mandy Guo and Yinfei Yang and Daniel Cer and Qinlan Shen and Noah Constant},\n year={2020},\n eprint={2005.02507},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}", "homepage": "https://github.com/google-research-datasets/MultiReQA", "license": "", "features": {"candidate_id": {"dtype": "string", "id": null, "_type": "Value"}, "response_start": {"dtype": "int32", "id": null, "_type": "Value"}, "response_end": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "multi_re_qa", "config_name": "SQuAD", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 16828974, "num_examples": 95659, "dataset_name": "multi_re_qa"}, "validation": {"name": "validation", "num_bytes": 2012997, "num_examples": 10642, "dataset_name": "multi_re_qa"}}, "download_checksums": {"https://github.com/google-research-datasets/MultiReQA/raw/master/data/train/SQuAD/candidates.json.gz": {"num_bytes": 2685384, "checksum": "efdcc6576283194be5ce8cb1cc51ffc15200e8b116479b4eda06b2e4b6b77bd0"}, "https://github.com/google-research-datasets/MultiReQA/raw/master/data/dev/SQuAD/candidates.json.gz": {"num_bytes": 318262, "checksum": "dc0fa9e536afa6969212cc5547dced39147ac93e007438464575ef4038dfd512"}}, "download_size": 3003646, "post_processing_size": null, "dataset_size": 18841971, "size_in_bytes": 21845617}, "NaturalQuestions": {"description": "MultiReQA contains the sentence boundary annotation from eight publicly available QA datasets including SearchQA, TriviaQA, HotpotQA, NaturalQuestions, SQuAD, BioASQ, RelationExtraction, and TextbookQA. Five of these datasets, including SearchQA, TriviaQA, HotpotQA, NaturalQuestions, SQuAD, contain both training and test data, and three, including BioASQ, RelationExtraction, TextbookQA, contain only the test data", "citation": "@misc{m2020multireqa,\n title={MultiReQA: A Cross-Domain Evaluation for Retrieval Question Answering Models},\n author={Mandy Guo and Yinfei Yang and Daniel Cer and Qinlan Shen and Noah Constant},\n year={2020},\n eprint={2005.02507},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}", "homepage": "https://github.com/google-research-datasets/MultiReQA", "license": "", "features": {"candidate_id": {"dtype": "string", "id": null, "_type": "Value"}, "response_start": {"dtype": "int32", "id": null, "_type": "Value"}, "response_end": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "multi_re_qa", "config_name": "NaturalQuestions", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 28732767, "num_examples": 448355, "dataset_name": "multi_re_qa"}, "validation": {"name": "validation", "num_bytes": 1418124, "num_examples": 22118, "dataset_name": "multi_re_qa"}}, "download_checksums": {"https://github.com/google-research-datasets/MultiReQA/raw/master/data/train/NaturalQuestions/candidates.json.gz": {"num_bytes": 5794887, "checksum": "dc39392d7a4995024a3d8fc127607e2cdea9081ed17c7c014bb5ffca220474da"}, "https://github.com/google-research-datasets/MultiReQA/raw/master/data/dev/NaturalQuestions/candidates.json.gz": {"num_bytes": 329600, "checksum": "4e9a422272d399206bc20438435fb60d4faddd4dc901db760d97b614cc082dd5"}}, "download_size": 6124487, "post_processing_size": null, "dataset_size": 30150891, "size_in_bytes": 36275378}, "BioASQ": {"description": "MultiReQA contains the sentence boundary annotation from eight publicly available QA datasets including SearchQA, TriviaQA, HotpotQA, NaturalQuestions, SQuAD, BioASQ, RelationExtraction, and TextbookQA. Five of these datasets, including SearchQA, TriviaQA, HotpotQA, NaturalQuestions, SQuAD, contain both training and test data, and three, including BioASQ, RelationExtraction, TextbookQA, contain only the test data", "citation": "@misc{m2020multireqa,\n title={MultiReQA: A Cross-Domain Evaluation for Retrieval Question Answering Models},\n author={Mandy Guo and Yinfei Yang and Daniel Cer and Qinlan Shen and Noah Constant},\n year={2020},\n eprint={2005.02507},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}", "homepage": "https://github.com/google-research-datasets/MultiReQA", "license": "", "features": {"candidate_id": {"dtype": "string", "id": null, "_type": "Value"}, "response_start": {"dtype": "int32", "id": null, "_type": "Value"}, "response_end": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "multi_re_qa", "config_name": "BioASQ", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 766190, "num_examples": 14158, "dataset_name": "multi_re_qa"}}, "download_checksums": {"https://github.com/google-research-datasets/MultiReQA/raw/master/data/test/BioASQ/candidates.json.gz": {"num_bytes": 156649, "checksum": "4312adbb038532564f4178018c32c22b46d5d2a0a896900b72bc6f4df3ec0d99"}}, "download_size": 156649, "post_processing_size": null, "dataset_size": 766190, "size_in_bytes": 922839}, "RelationExtraction": {"description": "MultiReQA contains the sentence boundary annotation from eight publicly available QA datasets including SearchQA, TriviaQA, HotpotQA, NaturalQuestions, SQuAD, BioASQ, RelationExtraction, and TextbookQA. Five of these datasets, including SearchQA, TriviaQA, HotpotQA, NaturalQuestions, SQuAD, contain both training and test data, and three, including BioASQ, RelationExtraction, TextbookQA, contain only the test data", "citation": "@misc{m2020multireqa,\n title={MultiReQA: A Cross-Domain Evaluation for Retrieval Question Answering Models},\n author={Mandy Guo and Yinfei Yang and Daniel Cer and Qinlan Shen and Noah Constant},\n year={2020},\n eprint={2005.02507},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}", "homepage": "https://github.com/google-research-datasets/MultiReQA", "license": "", "features": {"candidate_id": {"dtype": "string", "id": null, "_type": "Value"}, "response_start": {"dtype": "int32", "id": null, "_type": "Value"}, "response_end": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "multi_re_qa", "config_name": "RelationExtraction", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 217870, "num_examples": 3301, "dataset_name": "multi_re_qa"}}, "download_checksums": {"https://github.com/google-research-datasets/MultiReQA/raw/master/data/test/RelationExtraction/candidates.json.gz": {"num_bytes": 73019, "checksum": "23fcafe68a91367928a537e0220d2e52e9c5a662dd9976c102267640566b2f34"}}, "download_size": 73019, "post_processing_size": null, "dataset_size": 217870, "size_in_bytes": 290889}, "TextbookQA": {"description": "MultiReQA contains the sentence boundary annotation from eight publicly available QA datasets including SearchQA, TriviaQA, HotpotQA, NaturalQuestions, SQuAD, BioASQ, RelationExtraction, and TextbookQA. Five of these datasets, including SearchQA, TriviaQA, HotpotQA, NaturalQuestions, SQuAD, contain both training and test data, and three, including BioASQ, RelationExtraction, TextbookQA, contain only the test data", "citation": "@misc{m2020multireqa,\n title={MultiReQA: A Cross-Domain Evaluation for Retrieval Question Answering Models},\n author={Mandy Guo and Yinfei Yang and Daniel Cer and Qinlan Shen and Noah Constant},\n year={2020},\n eprint={2005.02507},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}", "homepage": "https://github.com/google-research-datasets/MultiReQA", "license": "", "features": {"candidate_id": {"dtype": "string", "id": null, "_type": "Value"}, "response_start": {"dtype": "int32", "id": null, "_type": "Value"}, "response_end": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "multi_re_qa", "config_name": "TextbookQA", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4182675, "num_examples": 71147, "dataset_name": "multi_re_qa"}}, "download_checksums": {"https://github.com/google-research-datasets/MultiReQA/raw/master/data/test/TextbookQA/candidates.json.gz": {"num_bytes": 704602, "checksum": "ac7a7dbae67afcce708c7ba6867991d8410ab92a8884964ec077898672f97208"}}, "download_size": 704602, "post_processing_size": null, "dataset_size": 4182675, "size_in_bytes": 4887277}, "DuoRC": {"description": "MultiReQA contains the sentence boundary annotation from eight publicly available QA datasets including SearchQA, TriviaQA, HotpotQA, NaturalQuestions, SQuAD, BioASQ, RelationExtraction, and TextbookQA. Five of these datasets, including SearchQA, TriviaQA, HotpotQA, NaturalQuestions, SQuAD, contain both training and test data, and three, including BioASQ, RelationExtraction, TextbookQA, contain only the test data", "citation": "@misc{m2020multireqa,\n title={MultiReQA: A Cross-Domain Evaluation for Retrieval Question Answering Models},\n author={Mandy Guo and Yinfei Yang and Daniel Cer and Qinlan Shen and Noah Constant},\n year={2020},\n eprint={2005.02507},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}", "homepage": "https://github.com/google-research-datasets/MultiReQA", "license": "", "features": {"candidate_id": {"dtype": "string", "id": null, "_type": "Value"}, "response_start": {"dtype": "int32", "id": null, "_type": "Value"}, "response_end": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "multi_re_qa", "config_name": "DuoRC", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1483518, "num_examples": 5525, "dataset_name": "multi_re_qa"}}, "download_checksums": {"https://github.com/google-research-datasets/MultiReQA/raw/master/data/test/DuoRC/candidates.json.gz": {"num_bytes": 97625, "checksum": "0ce13953cf96a2f9d2f9a0b0dee7249c98dc95690a00e34236059f59f5ebc674"}}, "download_size": 97625, "post_processing_size": null, "dataset_size": 1483518, "size_in_bytes": 1581143}}