Datasets:
Tasks:
Text Generation
Modalities:
Text
Formats:
parquet
Sub-tasks:
language-modeling
Languages:
code
Size:
100K - 1M
License:
add preprocessing file
Browse files- preprocessing.py +49 -0
preprocessing.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from tqdm import tqdm
|
2 |
+
from datasets import load_dataset, Dataset
|
3 |
+
|
4 |
+
def parse_data(ds):
|
5 |
+
"""Parse data into markdown-code pairs"""
|
6 |
+
markdowns = []
|
7 |
+
code_snippets = []
|
8 |
+
paths = []
|
9 |
+
repo_names = []
|
10 |
+
licenses = []
|
11 |
+
for i in tqdm(range(len(ds))):
|
12 |
+
inner_markdowns = []
|
13 |
+
inner_code_snippets = []
|
14 |
+
types = ds[i]["types"]
|
15 |
+
path = ds[i]["path"]
|
16 |
+
repo = ds[i]["repo_name"]
|
17 |
+
license = ds[i]["license"]
|
18 |
+
if types[0] == "code":
|
19 |
+
# drop first cell of code to have the notebook start with markdown
|
20 |
+
cells = ds[i]["cells"][1:]
|
21 |
+
types = types[1:]
|
22 |
+
else:
|
23 |
+
# drop first the two cells of markdown followed by code
|
24 |
+
# the first markown cell of a notebook is often a long description of the whole notebook
|
25 |
+
cells = ds[i]["cells"][2:]
|
26 |
+
types = ds[i]["types"][2:]
|
27 |
+
if len(cells) % 2 == 0:
|
28 |
+
inner_markdowns = [cells[j] for j in range(len(cells)) if j % 2 == 0]
|
29 |
+
inner_code_snippets = [cells[j+1] for j in range(len(cells) - 1) if j % 2 == 0]
|
30 |
+
else:
|
31 |
+
# delete last markdown cell that has no code next
|
32 |
+
inner_markdowns = [cells[j] for j in range(len(cells) - 1) if j % 2 == 0]
|
33 |
+
inner_code_snippets = [cells[j+1] for j in range(len(cells) - 2) if j % 2 == 0]
|
34 |
+
|
35 |
+
markdowns.extend(inner_markdowns)
|
36 |
+
code_snippets.extend(inner_code_snippets)
|
37 |
+
|
38 |
+
paths.extend([path] * len(inner_markdowns))
|
39 |
+
repo_names.extend([repo] * len(inner_markdowns))
|
40 |
+
licenses.extend([license] * len(inner_markdowns))
|
41 |
+
return markdowns, code_snippets, paths, repo_names, licenses
|
42 |
+
|
43 |
+
if __name__ == "__main__":
|
44 |
+
ds = load_dataset("codeparrot/github-jupyter-parsed", split="train")
|
45 |
+
print("Parsing data...")
|
46 |
+
markdowns, code_snippets, paths, repo_names, licenses = parse_data(ds)
|
47 |
+
data = {"markdown": markdowns, "code": code_snippets, "path": paths, "repo_name": repo_names, "license": licenses}
|
48 |
+
parsed_data = Dataset.from_dict(data)
|
49 |
+
parsed_data.push_to_hub("codeparrot/github-jupyter-parsed-v2")
|