Datasets:
Upload vndb_img.py
Browse files- vndb_img.py +212 -0
vndb_img.py
ADDED
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import datasets
|
3 |
+
import pandas as pd
|
4 |
+
import numpy as np
|
5 |
+
from tqdm import tqdm
|
6 |
+
import os
|
7 |
+
import glob
|
8 |
+
|
9 |
+
|
10 |
+
def read_header(path: str) -> list[str]:
|
11 |
+
with open(path, 'r', encoding='utf-8') as f:
|
12 |
+
return f.read().strip('\n\r').split('\t')
|
13 |
+
|
14 |
+
|
15 |
+
def read_table(root: str, name: str) -> pd.DataFrame:
|
16 |
+
return pd.read_table(
|
17 |
+
os.path.join(root, 'db', name),
|
18 |
+
sep='\t',
|
19 |
+
header=None,
|
20 |
+
names=read_header(os.path.join(root, 'db', name + '.header')),
|
21 |
+
encoding='utf-8',
|
22 |
+
)
|
23 |
+
|
24 |
+
|
25 |
+
def get_image_path(id: str):
|
26 |
+
cat = id[:2]
|
27 |
+
i = int(id[2:])
|
28 |
+
sub = i % 100
|
29 |
+
path = os.path.join(cat, f'{sub:02}', f'{i}.jpg')
|
30 |
+
return path
|
31 |
+
|
32 |
+
|
33 |
+
def get_class(avg: np.ndarray) -> np.ndarray:
|
34 |
+
# https://code.blicky.net/yorhel/vndb/src/commit/cac41c8361194bd1d010357a946f056454231509/lib/VNWeb/Images/Lib.pm#L155
|
35 |
+
cls = np.zeros_like(avg)
|
36 |
+
cls[40 < avg] = 1
|
37 |
+
cls[130 < avg] = 2
|
38 |
+
return cls
|
39 |
+
|
40 |
+
|
41 |
+
def load_image_df(db_root: str, img_root: str):
|
42 |
+
df = read_table(db_root, 'images')
|
43 |
+
|
44 |
+
df_st = df[df['id'].map(lambda x: x.startswith('sf'))].copy()
|
45 |
+
df_st.loc[:, 'id'] = df_st['id'].map(lambda x: 'st' + x[2:])
|
46 |
+
df = pd.concat([df, df_st], ignore_index=True)
|
47 |
+
|
48 |
+
types = {
|
49 |
+
'ch': 0,
|
50 |
+
'cv': 1,
|
51 |
+
'sf': 2,
|
52 |
+
'st': 3,
|
53 |
+
}
|
54 |
+
df['type'] = df['id'].map(lambda x: types[x[:2]])
|
55 |
+
|
56 |
+
df['sexual_class'] = get_class(df['c_sexual_avg'].values)
|
57 |
+
df['violence_class'] = get_class(df['c_violence_avg'].values)
|
58 |
+
|
59 |
+
df['file_name'] = df['id'].apply(get_image_path)
|
60 |
+
df['full_path'] = df['file_name'].apply(lambda x: os.path.join(img_root, x))
|
61 |
+
|
62 |
+
return df
|
63 |
+
|
64 |
+
|
65 |
+
def load_metadata(db_root: str, img_root: str) -> pd.DataFrame:
|
66 |
+
db_root = os.path.abspath(os.path.expanduser(db_root))
|
67 |
+
img_root = os.path.abspath(os.path.expanduser(img_root))
|
68 |
+
|
69 |
+
print('Loading metadata')
|
70 |
+
df = load_image_df(db_root, img_root)
|
71 |
+
|
72 |
+
print('Scanning images')
|
73 |
+
paths_from_files = set(glob.glob(os.path.join(img_root, '*', '*', '*.jpg')))
|
74 |
+
paths_from_db = set(df['full_path'].values)
|
75 |
+
|
76 |
+
print('Removing images without metadata')
|
77 |
+
count = 0
|
78 |
+
for im in tqdm(paths_from_files):
|
79 |
+
if im not in paths_from_db:
|
80 |
+
os.remove(im)
|
81 |
+
count += 1
|
82 |
+
print(f'Removed {count} images')
|
83 |
+
|
84 |
+
print('Removing images without files')
|
85 |
+
count = len(df)
|
86 |
+
df.drop(df[~df['full_path'].isin(paths_from_files)].index, inplace=True)
|
87 |
+
count = count - len(df)
|
88 |
+
print(f'Removed {count} images')
|
89 |
+
|
90 |
+
print(f'{len(df)} left')
|
91 |
+
|
92 |
+
return df
|
93 |
+
|
94 |
+
|
95 |
+
def load_vndb_img_df(db_root: str, img_root: str):
|
96 |
+
metadata = load_metadata(db_root, img_root)
|
97 |
+
metadata.reset_index(inplace=True)
|
98 |
+
metadata['image'] = metadata['full_path']
|
99 |
+
return metadata
|
100 |
+
|
101 |
+
|
102 |
+
TYPE_NAMES = [
|
103 |
+
'character',
|
104 |
+
'cover',
|
105 |
+
'screenshot_full',
|
106 |
+
'screenshot_thumb',
|
107 |
+
]
|
108 |
+
|
109 |
+
|
110 |
+
SEXUAL_NAMES = [
|
111 |
+
'safe',
|
112 |
+
'suggestive',
|
113 |
+
'explicit',
|
114 |
+
]
|
115 |
+
|
116 |
+
|
117 |
+
VIOLENCE_NAMES = [
|
118 |
+
'tame',
|
119 |
+
'violent',
|
120 |
+
'brutal',
|
121 |
+
]
|
122 |
+
|
123 |
+
|
124 |
+
class VNDBIMG(datasets.GeneratorBasedBuilder):
|
125 |
+
@property
|
126 |
+
def manual_download_instructions(self):
|
127 |
+
return '''\
|
128 |
+
Please download the vndb.org database dump manually from https://vndb.org/d14.
|
129 |
+
Download the 'Near-complete database' vndb-db-latest.tar.zst file.
|
130 |
+
Use `rsync` to download the 'Images' collection.
|
131 |
+
|
132 |
+
Create the following directory structure:
|
133 |
+
|
134 |
+
```
|
135 |
+
my/dataset/path
|
136 |
+
βββ db
|
137 |
+
β βββ vndb-db-latest.tar.zst
|
138 |
+
βββ vndb-img # this is the directory you downloaded with rsync
|
139 |
+
βββ ch
|
140 |
+
βββ cv
|
141 |
+
βββ sf
|
142 |
+
βββ st
|
143 |
+
βββ ...
|
144 |
+
```
|
145 |
+
|
146 |
+
Inside `my/dataset/path/db` run `zstd -d vndb-db-latest.tar.zst` and `tar -xf vndb-db-latest.tar`.
|
147 |
+
|
148 |
+
The final directory structure should look like this:
|
149 |
+
|
150 |
+
```
|
151 |
+
my/dataset/path
|
152 |
+
βββ db
|
153 |
+
β βββ vndb-db-latest.tar
|
154 |
+
β βββ vndb-db-latest.tar.zst
|
155 |
+
β βββ db
|
156 |
+
β βββ ...
|
157 |
+
βββ vndb-img
|
158 |
+
βββ ch
|
159 |
+
βββ cv
|
160 |
+
βββ sf
|
161 |
+
βββ st
|
162 |
+
βββ ...
|
163 |
+
```
|
164 |
+
|
165 |
+
Finally, use `datasets.load_dataset('carbon225/vndb_img', data_dir='my/dataset/path')`.
|
166 |
+
'''
|
167 |
+
|
168 |
+
def _info(self):
|
169 |
+
features = datasets.Features(
|
170 |
+
{
|
171 |
+
'index': datasets.Value('int32'),
|
172 |
+
'id': datasets.Value('string'),
|
173 |
+
'width': datasets.Value('int32'),
|
174 |
+
'height': datasets.Value('int32'),
|
175 |
+
'c_votecount': datasets.Value('int32'),
|
176 |
+
'c_sexual_avg': datasets.Value('int32'),
|
177 |
+
'c_sexual_stddev': datasets.Value('int32'),
|
178 |
+
'c_violence_avg': datasets.Value('int32'),
|
179 |
+
'c_violence_stddev': datasets.Value('int32'),
|
180 |
+
'c_weight': datasets.Value('int32'),
|
181 |
+
'type': datasets.ClassLabel(names=TYPE_NAMES),
|
182 |
+
'sexual_class': datasets.ClassLabel(names=SEXUAL_NAMES),
|
183 |
+
'violence_class': datasets.ClassLabel(names=VIOLENCE_NAMES),
|
184 |
+
'file_name': datasets.Value('string'),
|
185 |
+
'full_path': datasets.Value('string'),
|
186 |
+
'image': datasets.Image(),
|
187 |
+
}
|
188 |
+
)
|
189 |
+
return datasets.DatasetInfo(
|
190 |
+
features=features,
|
191 |
+
)
|
192 |
+
|
193 |
+
def _split_generators(self, dl_manager):
|
194 |
+
root_path = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
|
195 |
+
db_path = os.path.join(root_path, 'db')
|
196 |
+
img_path = os.path.join(root_path, 'vndb-img')
|
197 |
+
if not os.path.exists(db_path) or not os.path.exists(img_path):
|
198 |
+
raise FileNotFoundError(f'Dataset not found at {root_path}. Please follow the manual download instructions.')
|
199 |
+
return [
|
200 |
+
datasets.SplitGenerator(
|
201 |
+
name=datasets.Split.TRAIN,
|
202 |
+
gen_kwargs={
|
203 |
+
'db_path': db_path,
|
204 |
+
'img_path': img_path,
|
205 |
+
},
|
206 |
+
),
|
207 |
+
]
|
208 |
+
|
209 |
+
def _generate_examples(self, db_path, img_path):
|
210 |
+
metadata = load_vndb_img_df(db_path, img_path)
|
211 |
+
for i, row in metadata.iterrows():
|
212 |
+
yield i, row.to_dict()
|