parquet-converter commited on
Commit
edfc2ad
·
1 Parent(s): 799c432

Update parquet files

Browse files
Files changed (6) hide show
  1. 1hr/ozone-train.parquet +3 -0
  2. 8hr/ozone-train.parquet +3 -0
  3. README.md +0 -32
  4. eighthr.data +0 -0
  5. onehr.data +0 -0
  6. ozone.py +0 -301
1hr/ozone-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fcf35c73c4793023510bb9cb18cc01985b41a4840f3fda2074b4809fbb7f8f91
3
+ size 313600
8hr/ozone-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26440d9751d76079eb3fba438d43d8d0ddac779e95a2db3f51b5e33a8ad4eb5c
3
+ size 313639
README.md DELETED
@@ -1,32 +0,0 @@
1
- ---
2
- language:
3
- - en
4
- tags:
5
- - ozone
6
- - tabular_classification
7
- - binary_classification
8
- pretty_name: Ozone
9
- size_categories:
10
- - 1K<n<10K
11
- task_categories: # Full list at https://github.com/huggingface/hub-docs/blob/main/js/src/lib/interfaces/Types.ts
12
- - tabular-classification
13
- configs:
14
- - 8hr
15
- - 1hr
16
- ---
17
- # Ozone
18
- The [Ozone dataset](https://archive.ics.uci.edu/ml/datasets/Ozone) from the [UCI ML repository](https://archive.ics.uci.edu/ml/datasets).
19
-
20
- # Configurations and tasks
21
- | **Configuration** | **Task** | **Description** |
22
- |-------------------|---------------------------|-------------------------|
23
- | 8hr | Binary classification | Is there an ozone layer?|
24
- | 1hr | Binary classification | Is there an ozone layer?|
25
-
26
-
27
- # Usage
28
- ```python
29
- from datasets import load_dataset
30
-
31
- dataset = load_dataset("mstz/ozone", "8hr")["train"]
32
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eighthr.data DELETED
The diff for this file is too large to render. See raw diff
 
onehr.data DELETED
The diff for this file is too large to render. See raw diff
 
ozone.py DELETED
@@ -1,301 +0,0 @@
1
- """Ozone: A Census Dataset"""
2
-
3
- from typing import List
4
-
5
- import datasets
6
-
7
- import pandas
8
-
9
-
10
- VERSION = datasets.Version("1.0.0")
11
- _BASE_FEATURE_NAMES = [
12
- "Date",
13
- "WSR0",
14
- "WSR1",
15
- "WSR2",
16
- "WSR3",
17
- "WSR4",
18
- "WSR5",
19
- "WSR6",
20
- "WSR7",
21
- "WSR8",
22
- "WSR9",
23
- "WSR10",
24
- "WSR11",
25
- "WSR12",
26
- "WSR13",
27
- "WSR14",
28
- "WSR15",
29
- "WSR16",
30
- "WSR17",
31
- "WSR18",
32
- "WSR19",
33
- "WSR20",
34
- "WSR21",
35
- "WSR22",
36
- "WSR23",
37
- "WSR_PK",
38
- "WSR_AV",
39
- "T0",
40
- "T1",
41
- "T2",
42
- "T3",
43
- "T4",
44
- "T5",
45
- "T6",
46
- "T7",
47
- "T8",
48
- "T9",
49
- "T10",
50
- "T11",
51
- "T12",
52
- "T13",
53
- "T14",
54
- "T15",
55
- "T16",
56
- "T17",
57
- "T18",
58
- "T19",
59
- "T20",
60
- "T21",
61
- "T22",
62
- "T23",
63
- "T_PK",
64
- "T_AV",
65
- "T85",
66
- "RH85",
67
- "U85",
68
- "V85",
69
- "HT85",
70
- "T70",
71
- "RH70",
72
- "U70",
73
- "V70",
74
- "HT70",
75
- "T50",
76
- "RH50",
77
- "U50",
78
- "V50",
79
- "HT50",
80
- "KI",
81
- "TT",
82
- "SLP",
83
- "SLP_",
84
- "Precp",
85
- "Class"
86
- ]
87
-
88
- DESCRIPTION = "Ozone dataset from the UCI ML repository."
89
- _HOMEPAGE = "https://archive.ics.uci.edu/ml/datasets/Ozone"
90
- _URLS = ("https://archive.ics.uci.edu/ml/datasets/Ozone")
91
- _CITATION = """
92
- @misc{misc_ozone_level_detection_172,
93
- author = {Zhang,Kun, Fan,Wei & Yuan,XiaoJing},
94
- title = {{Ozone Level Detection}},
95
- year = {2008},
96
- howpublished = {UCI Machine Learning Repository},
97
- note = {{DOI}: \\url{10.24432/C5NG6W}}
98
- }"""
99
-
100
- # Dataset info
101
- urls_per_split = {
102
- "8hr": {"train": "https://huggingface.co/datasets/mstz/ozone/raw/main/eighthr.data"},
103
- "1hr": {"train": "https://huggingface.co/datasets/mstz/ozone/raw/main/onehr.data"},
104
- }
105
- features_types_per_config = {
106
- "8hr": {
107
- "WSR0": datasets.Value("float64"),
108
- "WSR1": datasets.Value("float64"),
109
- "WSR2": datasets.Value("float64"),
110
- "WSR3": datasets.Value("float64"),
111
- "WSR4": datasets.Value("float64"),
112
- "WSR5": datasets.Value("float64"),
113
- "WSR6": datasets.Value("float64"),
114
- "WSR7": datasets.Value("float64"),
115
- "WSR8": datasets.Value("float64"),
116
- "WSR9": datasets.Value("float64"),
117
- "WSR10": datasets.Value("float64"),
118
- "WSR11": datasets.Value("float64"),
119
- "WSR12": datasets.Value("float64"),
120
- "WSR13": datasets.Value("float64"),
121
- "WSR14": datasets.Value("float64"),
122
- "WSR15": datasets.Value("float64"),
123
- "WSR16": datasets.Value("float64"),
124
- "WSR17": datasets.Value("float64"),
125
- "WSR18": datasets.Value("float64"),
126
- "WSR19": datasets.Value("float64"),
127
- "WSR20": datasets.Value("float64"),
128
- "WSR21": datasets.Value("float64"),
129
- "WSR22": datasets.Value("float64"),
130
- "WSR23": datasets.Value("float64"),
131
- "WSR_PK": datasets.Value("float64"),
132
- "WSR_AV": datasets.Value("float64"),
133
- "T0": datasets.Value("float64"),
134
- "T1": datasets.Value("float64"),
135
- "T2": datasets.Value("float64"),
136
- "T3": datasets.Value("float64"),
137
- "T4": datasets.Value("float64"),
138
- "T5": datasets.Value("float64"),
139
- "T6": datasets.Value("float64"),
140
- "T7": datasets.Value("float64"),
141
- "T8": datasets.Value("float64"),
142
- "T9": datasets.Value("float64"),
143
- "T10": datasets.Value("float64"),
144
- "T11": datasets.Value("float64"),
145
- "T12": datasets.Value("float64"),
146
- "T13": datasets.Value("float64"),
147
- "T14": datasets.Value("float64"),
148
- "T15": datasets.Value("float64"),
149
- "T16": datasets.Value("float64"),
150
- "T17": datasets.Value("float64"),
151
- "T18": datasets.Value("float64"),
152
- "T19": datasets.Value("float64"),
153
- "T20": datasets.Value("float64"),
154
- "T21": datasets.Value("float64"),
155
- "T22": datasets.Value("float64"),
156
- "T23": datasets.Value("float64"),
157
- "T_PK": datasets.Value("float64"),
158
- "T_AV": datasets.Value("float64"),
159
- "T85": datasets.Value("float64"),
160
- "RH85": datasets.Value("float64"),
161
- "U85": datasets.Value("float64"),
162
- "V85": datasets.Value("float64"),
163
- "HT85": datasets.Value("float64"),
164
- "T70": datasets.Value("float64"),
165
- "RH70": datasets.Value("float64"),
166
- "U70": datasets.Value("float64"),
167
- "V70": datasets.Value("float64"),
168
- "HT70": datasets.Value("float64"),
169
- "T50": datasets.Value("float64"),
170
- "RH50": datasets.Value("float64"),
171
- "U50": datasets.Value("float64"),
172
- "V50": datasets.Value("float64"),
173
- "HT50": datasets.Value("float64"),
174
- "KI": datasets.Value("float64"),
175
- "TT": datasets.Value("float64"),
176
- "SLP": datasets.Value("float64"),
177
- "SLP_": datasets.Value("float64"),
178
- "Precp": datasets.Value("float64"),
179
- "Class": datasets.ClassLabel(num_classes=2, names=("no", "yes"))
180
- },
181
- "1hr": {
182
- "WSR0": datasets.Value("float64"),
183
- "WSR1": datasets.Value("float64"),
184
- "WSR2": datasets.Value("float64"),
185
- "WSR3": datasets.Value("float64"),
186
- "WSR4": datasets.Value("float64"),
187
- "WSR5": datasets.Value("float64"),
188
- "WSR6": datasets.Value("float64"),
189
- "WSR7": datasets.Value("float64"),
190
- "WSR8": datasets.Value("float64"),
191
- "WSR9": datasets.Value("float64"),
192
- "WSR10": datasets.Value("float64"),
193
- "WSR11": datasets.Value("float64"),
194
- "WSR12": datasets.Value("float64"),
195
- "WSR13": datasets.Value("float64"),
196
- "WSR14": datasets.Value("float64"),
197
- "WSR15": datasets.Value("float64"),
198
- "WSR16": datasets.Value("float64"),
199
- "WSR17": datasets.Value("float64"),
200
- "WSR18": datasets.Value("float64"),
201
- "WSR19": datasets.Value("float64"),
202
- "WSR20": datasets.Value("float64"),
203
- "WSR21": datasets.Value("float64"),
204
- "WSR22": datasets.Value("float64"),
205
- "WSR23": datasets.Value("float64"),
206
- "WSR_PK": datasets.Value("float64"),
207
- "WSR_AV": datasets.Value("float64"),
208
- "T0": datasets.Value("float64"),
209
- "T1": datasets.Value("float64"),
210
- "T2": datasets.Value("float64"),
211
- "T3": datasets.Value("float64"),
212
- "T4": datasets.Value("float64"),
213
- "T5": datasets.Value("float64"),
214
- "T6": datasets.Value("float64"),
215
- "T7": datasets.Value("float64"),
216
- "T8": datasets.Value("float64"),
217
- "T9": datasets.Value("float64"),
218
- "T10": datasets.Value("float64"),
219
- "T11": datasets.Value("float64"),
220
- "T12": datasets.Value("float64"),
221
- "T13": datasets.Value("float64"),
222
- "T14": datasets.Value("float64"),
223
- "T15": datasets.Value("float64"),
224
- "T16": datasets.Value("float64"),
225
- "T17": datasets.Value("float64"),
226
- "T18": datasets.Value("float64"),
227
- "T19": datasets.Value("float64"),
228
- "T20": datasets.Value("float64"),
229
- "T21": datasets.Value("float64"),
230
- "T22": datasets.Value("float64"),
231
- "T23": datasets.Value("float64"),
232
- "T_PK": datasets.Value("float64"),
233
- "T_AV": datasets.Value("float64"),
234
- "T85": datasets.Value("float64"),
235
- "RH85": datasets.Value("float64"),
236
- "U85": datasets.Value("float64"),
237
- "V85": datasets.Value("float64"),
238
- "HT85": datasets.Value("float64"),
239
- "T70": datasets.Value("float64"),
240
- "RH70": datasets.Value("float64"),
241
- "U70": datasets.Value("float64"),
242
- "V70": datasets.Value("float64"),
243
- "HT70": datasets.Value("float64"),
244
- "T50": datasets.Value("float64"),
245
- "RH50": datasets.Value("float64"),
246
- "U50": datasets.Value("float64"),
247
- "V50": datasets.Value("float64"),
248
- "HT50": datasets.Value("float64"),
249
- "KI": datasets.Value("float64"),
250
- "TT": datasets.Value("float64"),
251
- "SLP": datasets.Value("float64"),
252
- "SLP_": datasets.Value("float64"),
253
- "Precp": datasets.Value("float64"),
254
- "Class": datasets.ClassLabel(num_classes=2, names=("no", "yes"))
255
- },
256
-
257
- }
258
- features_per_config = {k: datasets.Features(features_types_per_config[k]) for k in features_types_per_config}
259
-
260
-
261
- class OzoneConfig(datasets.BuilderConfig):
262
- def __init__(self, **kwargs):
263
- super(OzoneConfig, self).__init__(version=VERSION, **kwargs)
264
- self.features = features_per_config[kwargs["name"]]
265
-
266
-
267
- class Ozone(datasets.GeneratorBasedBuilder):
268
- # dataset versions
269
- DEFAULT_CONFIG = "8hr"
270
- BUILDER_CONFIGS = [
271
- OzoneConfig(name="8hr",
272
- description="Ozone for binary classification."),
273
- OzoneConfig(name="1hr",
274
- description="Ozone for binary classification.")
275
- ]
276
-
277
-
278
- def _info(self):
279
- info = datasets.DatasetInfo(description=DESCRIPTION, citation=_CITATION, homepage=_HOMEPAGE,
280
- features=features_per_config[self.config.name])
281
-
282
- return info
283
-
284
- def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
285
- downloads = dl_manager.download_and_extract(urls_per_split)
286
-
287
- return [
288
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloads[self.config.name]["train"]})
289
- ]
290
-
291
- def _generate_examples(self, filepath: str):
292
- data = pandas.read_csv(filepath)
293
- data.drop("Date", axis="columns", inplace=True)
294
- data.loc[:, "Class"] = data.Class.astype(int)
295
- data = data[~(data.isin(["?"]).any(axis=1))]
296
- data = data.infer_objects()
297
-
298
- for row_id, row in data.iterrows():
299
- data_row = dict(row)
300
-
301
- yield row_id, data_row