Create .huggingface/dataset_config.yaml
Browse files
.huggingface/dataset_config.yaml
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import yaml
|
3 |
+
from pathlib import Path
|
4 |
+
|
5 |
+
# -------------------------------
|
6 |
+
# Step 1: Create Sample CSV Files
|
7 |
+
# -------------------------------
|
8 |
+
|
9 |
+
# Define sample data for the train split
|
10 |
+
train_data = {
|
11 |
+
"Timestamp": ["2025-01-01T12:00:00Z", "2025-01-02T13:00:00Z"],
|
12 |
+
"Source_IP": ["192.168.1.1", "10.0.0.2"],
|
13 |
+
"Destination_IP": ["192.168.1.100", "10.0.0.5"],
|
14 |
+
"MITRE_ATT&CK_ID": ["T1003", "T1021"],
|
15 |
+
"Tool_Name": ["ToolA", "ToolB"],
|
16 |
+
"Event_Description": ["Attack event description", "Defense event description"],
|
17 |
+
"Event_Type": ["Attack", "Defense"],
|
18 |
+
"MITRE_Tactic": ["Credential Access", "Defense Evasion"]
|
19 |
+
}
|
20 |
+
|
21 |
+
# Define sample data for the test split
|
22 |
+
test_data = {
|
23 |
+
"Timestamp": ["2025-01-03T14:30:00Z"],
|
24 |
+
"Source_IP": ["172.16.0.3"],
|
25 |
+
"Destination_IP": ["172.16.0.10"],
|
26 |
+
"MITRE_ATT&CK_ID": ["T1059"],
|
27 |
+
"Tool_Name": ["ToolC"],
|
28 |
+
"Event_Description": ["Test event description"],
|
29 |
+
"Event_Type": ["Attack"],
|
30 |
+
"MITRE_Tactic": ["Execution"]
|
31 |
+
}
|
32 |
+
|
33 |
+
# Create pandas DataFrames for train and test
|
34 |
+
train_df = pd.DataFrame(train_data)
|
35 |
+
test_df = pd.DataFrame(test_data)
|
36 |
+
|
37 |
+
# Save the DataFrames as CSV files in the current directory
|
38 |
+
train_csv_path = Path("train.csv")
|
39 |
+
test_csv_path = Path("test.csv")
|
40 |
+
|
41 |
+
train_df.to_csv(train_csv_path, index=False)
|
42 |
+
test_df.to_csv(test_csv_path, index=False)
|
43 |
+
|
44 |
+
print(f"Created CSV files: {train_csv_path} and {test_csv_path}")
|
45 |
+
|
46 |
+
# ------------------------------------------
|
47 |
+
# Step 2: Create a YAML Configuration File
|
48 |
+
# ------------------------------------------
|
49 |
+
# This YAML file tells the Hub how to treat your CSV files (which split they belong to)
|
50 |
+
|
51 |
+
config = {
|
52 |
+
"configs": [
|
53 |
+
{
|
54 |
+
"config_name": "default",
|
55 |
+
"data_files": [
|
56 |
+
{"split": "train", "path": str(train_csv_path)},
|
57 |
+
{"split": "test", "path": str(test_csv_path)}
|
58 |
+
]
|
59 |
+
}
|
60 |
+
]
|
61 |
+
}
|
62 |
+
|
63 |
+
# Write the YAML configuration to a file named 'dataset_config.yaml'
|
64 |
+
config_yaml_path = Path("dataset_config.yaml")
|
65 |
+
with open(config_yaml_path, "w") as f:
|
66 |
+
yaml.dump(config, f, default_flow_style=False)
|
67 |
+
|
68 |
+
print(f"Created YAML configuration file: {config_yaml_path}")
|