mathiaszinnen commited on
Commit
b5421ed
·
1 Parent(s): fb6b308

refactor dataset

Browse files
Files changed (6) hide show
  1. .gitignore +2 -0
  2. download_utils.py +45 -0
  3. meta/meta.csv +0 -0
  4. meta/meta_test.csv +0 -0
  5. meta/meta_train.csv +0 -0
  6. odor.py +34 -24
.gitignore CHANGED
@@ -1,2 +1,4 @@
1
  .vscode/
2
  .ipynb_checkpoints/
 
 
 
1
  .vscode/
2
  .ipynb_checkpoints/
3
+ *.jpg
4
+ __pycache__
download_utils.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+
4
+ import requests
5
+ import pandas as pd
6
+ from tqdm import tqdm
7
+ from multiprocessing.pool import ThreadPool
8
+ from multiprocessing import cpu_count
9
+ from pathlib import Path
10
+ from requests.exceptions import MissingSchema, Timeout, ConnectionError, InvalidSchema
11
+
12
+
13
+ def download_one(entry, overwrite=False):
14
+ fn, uri, target_pth, retries = entry
15
+ fn = fn.replace("/", "_")
16
+ path = f'{target_pth}/{fn}'
17
+ if os.path.exists(path) and not overwrite:
18
+ return fn
19
+
20
+ for i in range(retries):
21
+ try:
22
+ r = requests.get(uri, stream=True, timeout=50)
23
+ except (MissingSchema, Timeout, ConnectionError, InvalidSchema):
24
+ time.sleep(i)
25
+ continue
26
+
27
+ if r.status_code == 200:
28
+ with open(path, 'wb') as f:
29
+ for chunk in r:
30
+ f.write(chunk)
31
+ return fn
32
+ else:
33
+ time.sleep(i)
34
+ continue
35
+
36
+ return fn
37
+
38
+
39
+ def download_all(metadata_pth, target_pth, retries=3):
40
+ df = pd.read_csv(metadata_pth)
41
+ entries = [[*x, target_pth, retries] for x in df[['File Name', 'Image Credits']].values]
42
+ n_processes = max(1, cpu_count() - 1)
43
+ with ThreadPool(n_processes) as p:
44
+ results = list(tqdm(p.imap(download_one, entries), total=len(entries)))
45
+ return results
meta/meta.csv CHANGED
The diff for this file is too large to render. See raw diff
 
meta/meta_test.csv DELETED
The diff for this file is too large to render. See raw diff
 
meta/meta_train.csv DELETED
The diff for this file is too large to render. See raw diff
 
odor.py CHANGED
@@ -21,6 +21,8 @@ import os
21
  import pandas as pd
22
 
23
  import datasets
 
 
24
 
25
 
26
  _CITATION = """\
@@ -72,34 +74,32 @@ class ODOR(datasets.GeneratorBasedBuilder):
72
  )
73
 
74
  def _split_generators(self, dl_manager):
75
- # df_meta = pd.read_csv('meta/meta.csv')
76
- # dl_manager.download(df_meta['Image Credits'].values)
77
- dl_manager.download_and_extract(_URL)
78
-
 
 
79
  return [
80
  datasets.SplitGenerator(
81
  name=datasets.Split.TRAIN,
82
  gen_kwargs={
83
  "annotation_file_path": "annotations/train.json",
84
- "metadata_file_path": "meta/meta_train.csv"
 
85
  },
86
  ),
87
  datasets.SplitGenerator(
88
  name=datasets.Split.TEST,
89
  gen_kwargs={
90
  "annotation_file_path": "annotations/test.json",
91
- "metadata_file_path": "meta/meta_test.csv"
 
92
  },
93
  ),
94
  ]
95
 
96
- def _generate_examples(self, annotation_file_path, metadata_file_path):
97
- return None
98
- # load metadata
99
- # meta_df = pd.read_csv(metadata_file_path)
100
-
101
- # files = download_images(meta_df)
102
-
103
 
104
  def process_annot(annot, category_id_to_category):
105
  return {
@@ -111,18 +111,18 @@ class ODOR(datasets.GeneratorBasedBuilder):
111
 
112
  image_id_to_image = {}
113
  idx = 0
114
- # This loop relies on the ordering of the files in the archive:
115
- # Annotation files come first, then the images.
116
- for path, f in files:
 
 
 
 
 
 
 
117
  file_name = os.path.basename(path)
118
- if path == annotation_file_path:
119
- annotations = json.load(f)
120
- category_id_to_category = {category["id"]: category["name"] for category in annotations["categories"]}
121
- image_id_to_annotations = collections.defaultdict(list)
122
- for annot in annotations["annotations"]:
123
- image_id_to_annotations[annot["image_id"]].append(annot)
124
- image_id_to_image = {annot["file_name"]: annot for annot in annotations["images"]}
125
- elif file_name in image_id_to_image:
126
  image = image_id_to_image[file_name]
127
  objects = [
128
  process_annot(annot, category_id_to_category) for annot in image_id_to_annotations[image["id"]]
@@ -135,3 +135,13 @@ class ODOR(datasets.GeneratorBasedBuilder):
135
  "objects": objects,
136
  }
137
  idx += 1
 
 
 
 
 
 
 
 
 
 
 
21
  import pandas as pd
22
 
23
  import datasets
24
+ import multiprocessing
25
+ from download_utils import download_all
26
 
27
 
28
  _CITATION = """\
 
74
  )
75
 
76
  def _split_generators(self, dl_manager):
77
+ imgs_dir = f'{self.cache_dir}/images' # probably better to use the huggingface cache dir here
78
+ csv_pth = 'meta/meta.csv'
79
+ if not os.path.isdir(imgs_dir):
80
+ os.makedirs(imgs_dir)
81
+ img_pths = download_all(csv_pth, imgs_dir)
82
+
83
  return [
84
  datasets.SplitGenerator(
85
  name=datasets.Split.TRAIN,
86
  gen_kwargs={
87
  "annotation_file_path": "annotations/train.json",
88
+ "metadata_file_path": csv_pth,
89
+ "img_dir": imgs_dir
90
  },
91
  ),
92
  datasets.SplitGenerator(
93
  name=datasets.Split.TEST,
94
  gen_kwargs={
95
  "annotation_file_path": "annotations/test.json",
96
+ "metadata_file_path": csv_pth,
97
+ "img_dir": imgs_dir
98
  },
99
  ),
100
  ]
101
 
102
+ def _generate_examples(self, annotation_file_path, metadata_file_path, img_dir):
 
 
 
 
 
 
103
 
104
  def process_annot(annot, category_id_to_category):
105
  return {
 
111
 
112
  image_id_to_image = {}
113
  idx = 0
114
+
115
+ with open(annotation_file_path) as f:
116
+ annotations = json.load(f)
117
+ category_id_to_category = {category["id"]: category["name"] for category in annotations["categories"]}
118
+ image_id_to_annotations = collections.defaultdict(list)
119
+ for annot in annotations["annotations"]:
120
+ image_id_to_annotations[annot["image_id"]].append(annot)
121
+ image_id_to_image = {annot["file_name"]: annot for annot in annotations["images"]}
122
+
123
+ for path in os.listdir(img_dir):
124
  file_name = os.path.basename(path)
125
+ if file_name in image_id_to_image:
 
 
 
 
 
 
 
126
  image = image_id_to_image[file_name]
127
  objects = [
128
  process_annot(annot, category_id_to_category) for annot in image_id_to_annotations[image["id"]]
 
135
  "objects": objects,
136
  }
137
  idx += 1
138
+
139
+ if __name__ == '__main__':
140
+ ds_builder = ODOR()
141
+ n_processes = min(1, multiprocessing.cpu_count()-1)
142
+
143
+ ds_builder.download_and_prepare()
144
+
145
+ ds = ds_builder.as_dataset()
146
+
147
+ print('ay')