Canstralian commited on
Commit
c2257a6
·
verified ·
1 Parent(s): 39c5e50

Upload 17 files

Browse files
.gitignore ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # UV
98
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ #uv.lock
102
+
103
+ # poetry
104
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
106
+ # commonly ignored for libraries.
107
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
+ #poetry.lock
109
+
110
+ # pdm
111
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
112
+ #pdm.lock
113
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
114
+ # in version control.
115
+ # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
116
+ .pdm.toml
117
+ .pdm-python
118
+ .pdm-build/
119
+
120
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
121
+ __pypackages__/
122
+
123
+ # Celery stuff
124
+ celerybeat-schedule
125
+ celerybeat.pid
126
+
127
+ # SageMath parsed files
128
+ *.sage.py
129
+
130
+ # Environments
131
+ .env
132
+ .venv
133
+ env/
134
+ venv/
135
+ ENV/
136
+ env.bak/
137
+ venv.bak/
138
+
139
+ # Spyder project settings
140
+ .spyderproject
141
+ .spyproject
142
+
143
+ # Rope project settings
144
+ .ropeproject
145
+
146
+ # mkdocs documentation
147
+ /site
148
+
149
+ # mypy
150
+ .mypy_cache/
151
+ .dmypy.json
152
+ dmypy.json
153
+
154
+ # Pyre type checker
155
+ .pyre/
156
+
157
+ # pytype static type analyzer
158
+ .pytype/
159
+
160
+ # Cython debug symbols
161
+ cython_debug/
162
+
163
+ # PyCharm
164
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
165
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
166
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
167
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
168
+ #.idea/
169
+
170
+ # PyPI configuration file
171
+ .pypirc
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Esteban Cara de Sexo
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README.md CHANGED
@@ -1,3 +1,122 @@
1
- ---
2
- license: mit
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Purple Team Cybersecurity Dataset
2
+
3
+ <!-- Hugging Face Metadata -->
4
+ <!-- language: other -->
5
+ <!-- license: mit -->
6
+ <!-- datasets: canstralian/Purple-Team-Cybersecurity-Dataset -->
7
+
8
+ <!-- Badges -->
9
+ [![Build Status](https://img.shields.io/github/actions/workflow/status/canstralian/Purple-Team-Cybersecurity-Dataset/ci.yml)](https://github.com/canstralian/Purple-Team-Cybersecurity-Dataset/actions)
10
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
11
+
12
+ ## Dataset Card for Purple Team Cybersecurity Dataset
13
+
14
+ **Dataset Summary**
15
+
16
+ The Purple Team Cybersecurity Dataset is a comprehensive collection of events that amalgamates Red Team (attack) and Blue Team (defense) data. This dataset is designed to facilitate research and development in cybersecurity, particularly in understanding and analyzing the interplay between offensive and defensive strategies. It includes standardized fields such as timestamps, IP addresses, MITRE ATT&CK IDs, tool names, and event descriptions, providing a balanced perspective of both attack and defense scenarios.
17
+
18
+ Dataset Details
19
+
20
+ Dataset Description
21
+
22
+ This dataset was curated by combining existing Red Team and Blue Team datasets, normalizing and standardizing the data to ensure consistency. Each event is labeled as either “Attack” or “Defense” and is mapped to the corresponding MITRE ATT&CK tactics. The dataset aims to support cybersecurity research by providing a balanced and comprehensive view of both offensive and defensive events.
23
+    •   Curated by: [Your Name or Organization]
24
+    •   Funded by [optional]: [More Information Needed]
25
+    •   Shared by [optional]: [More Information Needed]
26
+    •   Language(s) (NLP): Not Applicable
27
+    •   License: [Specify License, e.g., MIT License]
28
+
29
+ Dataset Sources
30
+    •   Repository: [Link to Dataset Repository]
31
+    •   Paper [optional]: [Link to Related Paper]
32
+    •   Demo [optional]: [Link to Demo]
33
+
34
+ Uses
35
+
36
+ Direct Use
37
+
38
+ This dataset is intended for use in cybersecurity research, including but not limited to:
39
+    •   Analyzing attack and defense patterns
40
+    •   Developing and testing intrusion detection systems
41
+    •   Training machine learning models for threat detection
42
+
43
+ Out-of-Scope Use
44
+
45
+ The dataset is not suitable for:
46
+    •   Real-time threat monitoring in production environments
47
+    •   Any application involving sensitive or personally identifiable information
48
+
49
+ Dataset Structure
50
+
51
+ The dataset comprises events with the following fields:
52
+    •   Timestamp: ISO 8601 formatted date and time
53
+    •   Source_IP: Validated source IP address
54
+    •   Destination_IP: Validated destination IP address
55
+    •   MITRE_ATT&CK_ID: Standardized MITRE ATT&CK technique ID
56
+    •   Tool_Name: Standardized name of the tool used
57
+    •   Event_Description: Detailed description of the event
58
+    •   Event_Type: Label indicating “Attack” or “Defense”
59
+    •   MITRE_Tactic: Mapped MITRE ATT&CK tactic
60
+
61
+ Dataset Creation
62
+
63
+ Curation Rationale
64
+
65
+ The dataset was created to provide a balanced and comprehensive resource for cybersecurity professionals and researchers, enabling the study of both attack and defense mechanisms within a unified framework.
66
+
67
+ Source Data
68
+
69
+ Data Collection and Processing
70
+
71
+ Data was collected from existing Red Team and Blue Team datasets, followed by normalization and standardization processes to ensure consistency across fields. Events were labeled and mapped to MITRE ATT&CK tactics to facilitate structured analysis.
72
+
73
+ Who are the source data producers?
74
+
75
+ The original data was produced by various cybersecurity teams and organizations engaged in offensive and defensive security operations.
76
+
77
+ Annotations
78
+
79
+ Annotation process
80
+
81
+ Events were labeled as “Attack” or “Defense” based on their origin and nature. Each event was mapped to the corresponding MITRE ATT&CK tactic using a predefined mapping.
82
+
83
+ Who are the annotators?
84
+
85
+ The annotation was performed by cybersecurity experts with experience in both offensive and defensive operations.
86
+
87
+ Personal and Sensitive Information
88
+
89
+ The dataset does not contain any personal or sensitive information. All IP addresses have been validated to ensure they do not correspond to real-world entities.
90
+
91
+ Bias, Risks, and Limitations
92
+
93
+ While efforts have been made to balance the dataset, there may still be inherent biases due to the nature of the source data. Users should be aware of these potential biases and exercise caution when applying the dataset to different contexts.
94
+
95
+ Recommendations
96
+
97
+ Users should be made aware of the risks, biases, and limitations of the dataset. More information needed for further recommendations.
98
+
99
+ Citation
100
+
101
+ BibTeX:
102
+
103
+ [More Information Needed]
104
+
105
+ APA:
106
+
107
+ [More Information Needed]
108
+
109
+ Glossary
110
+    •   MITRE ATT&CK: A globally accessible knowledge base of adversary tactics and techniques based on real-world observations.
111
+
112
+ More Information
113
+
114
+ [More Information Needed]
115
+
116
+ Dataset Card Authors
117
+
118
+ [Your Name or Organization]
119
+
120
+ Dataset Card Contact
121
+
122
+ [Your Contact Information]
data/purple_team_cybersecurity_dataset.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ Timestamp,Source_IP,Destination_IP,MITRE_ATT&CK_ID,Tool_Name,Event_Description,Event_Type,MITRE_Tactic
2
+ 2025-02-03T10:15:30Z,192.168.1.10,10.0.0.5,T1078,Mimikatz,"Unauthorized access attempt using stolen credentials.",Attack,Credential Access
3
+ 2025-02-03T10:20:45Z,10.0.0.5,192.168.1.10,D3FEND-D3-DA0001,Sysmon,"Detection of unauthorized access attempt; alert generated.",Defense,Credential Access
data/purple_team_cybersecurity_dataset.json ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "Timestamp": "2025-02-03T11:05:00Z",
4
+ "Source_IP": "192.168.1.40",
5
+ "Destination_IP": "192.168.1.45",
6
+ "MITRE_ATT&CK_ID": "T1134",
7
+ "Tool_Name": "Token Impersonation Tool",
8
+ "Event_Description": "Impersonation of a token to escalate privileges within the system.",
9
+ "Event_Type": "Attack",
10
+ "MITRE_Tactic": "Privilege Escalation"
11
+ },
12
+ {
13
+ "Timestamp": "2025-02-03T11:10:15Z",
14
+ "Source_IP": "10.0.0.50",
15
+ "Destination_IP": "10.0.0.55",
16
+ "MITRE_ATT&CK_ID": "T1203",
17
+ "Tool_Name": "Exploit Kit",
18
+ "Event_Description": "Exploitation of a vulnerability in a web browser to execute arbitrary code.",
19
+ "Event_Type": "Attack",
20
+ "MITRE_Tactic": "Execution"
21
+ },
22
+ {
23
+ "Timestamp": "2025-02-03T11:15:30Z",
24
+ "Source_IP": "172.16.0.30",
25
+ "Destination_IP": "172.16.0.35",
26
+ "MITRE_ATT&CK_ID": "T1499",
27
+ "Tool_Name": "DDoS Tool",
28
+ "Event_Description": "Distributed Denial of Service attack causing disruption of services.",
29
+ "Event_Type": "Attack",
30
+ "MITRE_Tactic": "Impact"
31
+ },
32
+ {
33
+ "Timestamp": "2025-02-03T11:20:45Z",
34
+ "Source_IP": "192.168.1.60",
35
+ "Destination_IP": "192.168.1.65",
36
+ "MITRE_ATT&CK_ID": "T1082",
37
+ "Tool_Name": "System Profiler",
38
+ "Event_Description": "Discovery of system information to identify potential targets.",
39
+ "Event_Type": "Attack",
40
+ "MITRE_Tactic": "Discovery"
41
+ },
42
+ {
43
+ "Timestamp": "2025-02-03T11:25:00Z",
44
+ "Source_IP": "10.0.0.60",
45
+ "Destination_IP": "10.0.0.65",
46
+ "MITRE_ATT&CK_ID": "T1046",
47
+ "Tool_Name": "Network Scanner",
48
+ "Event_Description": "Scanning of network to identify active hosts and open ports.",
49
+ "Event_Type": "Attack",
50
+ "MITRE_Tactic": "Discovery"
51
+ },
52
+ {
53
+ "Timestamp": "2025-02-03T11:30:15Z",
54
+ "Source_IP": "172.16.0.40",
55
+ "Destination_IP": "172.16.0.45",
56
+ "MITRE_ATT&CK_ID": "T1112",
57
+ "Tool_Name": "Registry Editor",
58
+ "Event_Description": "Modification of Windows Registry keys to establish persistence.",
59
+ "Event_Type": "Attack",
60
+ "MITRE_Tactic": "Persistence"
61
+ },
62
+ {
63
+ "Timestamp": "2025-02-03T11:35:30Z",
64
+ "Source_IP": "192.168.1.70",
65
+ "Destination_IP": "192.168.1.75",
66
+ "MITRE_ATT&CK_ID": "T1053",
67
+ "Tool_Name": "Scheduled Task",
68
+ "Event_Description": "Creation of a scheduled task to execute malicious code at a later time.",
69
+ "Event_Type": "Attack",
70
+ "MITRE_Tactic": "Execution"
71
+ },
72
+ {
73
+ "Timestamp": "2025-02-03T11:40:45Z",
74
+ "Source_IP": "10.0.0.70",
75
+ "Destination_IP": "10.0.0.75",
76
+ "MITRE_ATT&CK_ID": "T1070",
77
+ "Tool_Name": "Log Cleaner",
78
+ "Event_Description": "Deletion of event logs to cover tracks and avoid detection.",
79
+ "Event_Type": "Attack",
80
+ "MITRE_Tactic": "Defense Evasion"
81
+ },
82
+ {
83
+ "Timestamp": "2025-02-03T11:45:00Z",
84
+ "Source_IP": "172.16.0.50",
85
+ "Destination_IP": "172.16.0.55",
86
+ "MITRE_ATT&CK_ID": "T1090",
87
+ "Tool_Name": "Proxy Tool",
88
+ "Event_Description": "Use of a proxy to obscure the origin of malicious traffic.",
89
+ "Event_Type": "Attack",
90
+ "MITRE_Tactic": "Command and Control"
91
+ },
92
+ {
93
+ "Timestamp": "2025-02-03T11:50:15Z",
94
+ "Source_IP": "192.168.1.80",
95
+ "Destination_IP": "192.168.1.85",
96
+ "MITRE_ATT&CK_ID": "T1486",
97
+ "Tool_Name": "Ransomware",
98
+ "Event_Description": "Encryption of files on disk to extort payment from the victim.",
99
+ "Event_Type": "Attack",
100
+ "MITRE_Tactic": "Impact"
101
+ }
102
+ ]
scripts/analysis.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import matplotlib.pyplot as plt
2
+ import seaborn as sns
3
+ import pandas as pd
4
+
5
+ # Plot a heatmap of correlations between features
6
+ def plot_correlation_heatmap(df: pd.DataFrame) -> None:
7
+ """
8
+ Plots a heatmap showing the correlations between numeric features in the dataset.
9
+
10
+ Args:
11
+ - df (pd.DataFrame): The dataset.
12
+ """
13
+ correlation_matrix = df.corr()
14
+ plt.figure(figsize=(10, 8))
15
+ sns.heatmap(correlation_matrix, annot=True, cmap="coolwarm", fmt='.2f', linewidths=0.5)
16
+ plt.title("Correlation Heatmap")
17
+ plt.show()
18
+
19
+ # Plot feature distribution for each numeric feature
20
+ def plot_feature_distributions(df: pd.DataFrame) -> None:
21
+ """
22
+ Plots the distribution of each numeric feature in the dataset.
23
+
24
+ Args:
25
+ - df (pd.DataFrame): The dataset.
26
+ """
27
+ numeric_columns = df.select_dtypes(include=[np.number]).columns
28
+ df[numeric_columns].hist(figsize=(12, 10), bins=30, edgecolor='black')
29
+ plt.suptitle("Feature Distributions")
30
+ plt.show()
31
+
32
+ # Feature importance based on a model (Random Forest example)
33
+ def plot_feature_importance(model, X_train: pd.DataFrame) -> None:
34
+ """
35
+ Plots the feature importance based on the trained model.
36
+
37
+ Args:
38
+ - model: The trained model (Random Forest).
39
+ - X_train (pd.DataFrame): The training feature data.
40
+ """
41
+ feature_importances = model.feature_importances_
42
+ feature_names = X_train.columns
43
+ sorted_idx = feature_importances.argsort()
44
+
45
+ plt.figure(figsize=(10, 6))
46
+ plt.barh(feature_names[sorted_idx], feature_importances[sorted_idx])
47
+ plt.title("Feature Importance")
48
+ plt.xlabel("Importance")
49
+ plt.show()
scripts/augmentation.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pandas as pd
3
+ from sklearn.preprocessing import PolynomialFeatures
4
+ from sklearn.utils import resample
5
+
6
+ # Add polynomial features for data augmentation
7
+ def add_polynomial_features(df: pd.DataFrame, degree: int = 2) -> pd.DataFrame:
8
+ """
9
+ Adds polynomial features to the dataset.
10
+
11
+ Args:
12
+ - df (pd.DataFrame): The dataset.
13
+ - degree (int): The degree of the polynomial features.
14
+
15
+ Returns:
16
+ - pd.DataFrame: The augmented dataset with polynomial features.
17
+ """
18
+ poly = PolynomialFeatures(degree)
19
+ poly_features = poly.fit_transform(df.select_dtypes(include=np.number))
20
+ poly_feature_names = poly.get_feature_names(df.select_dtypes(include=np.number).columns)
21
+
22
+ # Combine polynomial features with the original dataset
23
+ poly_df = pd.DataFrame(poly_features, columns=poly_feature_names)
24
+ df_augmented = pd.concat([df, poly_df], axis=1)
25
+
26
+ return df_augmented
27
+
28
+ # Synthetic oversampling using bootstrap sampling (Resampling)
29
+ def oversample_data(df: pd.DataFrame, target_column: str) -> pd.DataFrame:
30
+ """
31
+ Performs oversampling to balance the dataset using bootstrapping.
32
+
33
+ Args:
34
+ - df (pd.DataFrame): The dataset.
35
+ - target_column (str): The target column to balance.
36
+
37
+ Returns:
38
+ - pd.DataFrame: The resampled dataset.
39
+ """
40
+ # Separate majority and minority classes
41
+ majority_class = df[df[target_column] == df[target_column].mode()[0]]
42
+ minority_class = df[df[target_column] != df[target_column].mode()[0]]
43
+
44
+ # Resample minority class
45
+ minority_resampled = resample(minority_class,
46
+ replace=True, # Allow sampling of the same row more than once
47
+ n_samples=majority_class.shape[0], # Equalize the number of samples
48
+ random_state=42)
49
+
50
+ # Combine majority and minority
51
+ df_resampled = pd.concat([majority_class, minority_resampled])
52
+
53
+ return df_resampled
scripts/config.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Configuration file for model parameters and preprocessing settings
2
+
3
+ class Config:
4
+ # Data Preprocessing Settings
5
+ missing_value_strategy = "mean" # Options: 'mean', 'median', 'drop'
6
+
7
+ # Model Settings
8
+ model_type = "random_forest" # Options: 'logistic_regression', 'random_forest'
9
+ random_forest_n_estimators = 100
10
+
11
+ # Augmentation Settings
12
+ noise_level = 0.01 # For noise augmentation
13
+ polynomial_degree = 2 # For polynomial feature augmentation
14
+
15
+ # Data Sampling Settings
16
+ target_column = "target" # Name of the target column for resampling
17
+ oversample = True # Whether to apply oversampling
18
+
19
+ # Example of using the config:
20
+ # config = Config()
21
+ # print(config.model_type)
scripts/deployment.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import joblib
2
+ from sklearn.externals import joblib
3
+ import os
4
+
5
+ # Save model to disk
6
+ def save_model(model, model_name: str) -> None:
7
+ """
8
+ Saves the trained model to a file for deployment.
9
+
10
+ Args:
11
+ - model: The trained machine learning model.
12
+ - model_name (str): The name to use for the saved model file.
13
+ """
14
+ model_path = os.path.join('models', f'{model_name}.pkl')
15
+ joblib.dump(model, model_path)
16
+ print(f"Model saved to {model_path}")
17
+
18
+ # Load model from disk
19
+ def load_model(model_name: str):
20
+ """
21
+ Loads a pre-trained model from disk.
22
+
23
+ Args:
24
+ - model_name (str): The name of the model file.
25
+
26
+ Returns:
27
+ - model: The loaded model.
28
+ """
29
+ model_path = os.path.join('models', f'{model_name}.pkl')
30
+ if os.path.exists(model_path):
31
+ model = joblib.load(model_path)
32
+ print(f"Model loaded from {model_path}")
33
+ return model
34
+ else:
35
+ print(f"Model {model_name} not found.")
36
+ return None
scripts/documentation.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ # Generate README file with dataset description
4
+ def generate_readme(dataset_name: str, description: str, columns: list) -> None:
5
+ """
6
+ Generates a README file for the dataset, including a description and column details.
7
+
8
+ Args:
9
+ - dataset_name (str): The name of the dataset.
10
+ - description (str): Description of the dataset.
11
+ - columns (list): List of columns with descriptions.
12
+ """
13
+ readme_content = f"# {dataset_name}\n\n"
14
+ readme_content += f"## Description\n{description}\n\n"
15
+ readme_content += "## Columns\n"
16
+
17
+ for column, col_description in columns:
18
+ readme_content += f"- {column}: {col_description}\n"
19
+
20
+ # Save README.md file
21
+ with open(f"{dataset_name}/README.md", "w") as f:
22
+ f.write(readme_content)
23
+
24
+ print(f"README generated for {dataset_name}")
25
+
26
+ # Create a script for generating dataset-specific documentation
27
+ def generate_dataset_docs(df, dataset_name: str) -> None:
28
+ """
29
+ Generates a dataset documentation file with basic info such as column types.
30
+
31
+ Args:
32
+ - df (pd.DataFrame): The dataset.
33
+ - dataset_name (str): The name of the dataset.
34
+ """
35
+ columns_info = [(col, df[col].dtype) for col in df.columns]
36
+
37
+ generate_readme(dataset_name, "A dataset for modeling purposes.", columns_info)
scripts/modeling.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sklearn.linear_model import LogisticRegression
2
+ from sklearn.ensemble import RandomForestClassifier
3
+ from sklearn.metrics import accuracy_score, confusion_matrix
4
+ from sklearn.model_selection import train_test_split
5
+ import pandas as pd
6
+
7
+ # Train a classifier model
8
+ def train_model(df: pd.DataFrame, target_column: str, model_type: str = "logistic_regression"):
9
+ """
10
+ Trains a model on the dataset using the specified model type.
11
+
12
+ Args:
13
+ - df (pd.DataFrame): The dataset.
14
+ - target_column (str): The target column for prediction.
15
+ - model_type (str): Type of model ('logistic_regression' or 'random_forest').
16
+
17
+ Returns:
18
+ - model: The trained model.
19
+ """
20
+ X = df.drop(columns=[target_column])
21
+ y = df[target_column]
22
+
23
+ # Split data into training and testing sets
24
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
25
+
26
+ if model_type == "logistic_regression":
27
+ model = LogisticRegression()
28
+ elif model_type == "random_forest":
29
+ model = RandomForestClassifier(n_estimators=100)
30
+ else:
31
+ raise ValueError(f"Unsupported model type: {model_type}")
32
+
33
+ # Train the model
34
+ model.fit(X_train, y_train)
35
+
36
+ # Predict and evaluate model
37
+ y_pred = model.predict(X_test)
38
+ accuracy = accuracy_score(y_test, y_pred)
39
+ cm = confusion_matrix(y_test, y_pred)
40
+
41
+ print(f"Model Accuracy: {accuracy}")
42
+ print(f"Confusion Matrix:\n{cm}")
43
+
44
+ return model
45
+
46
+ # Model evaluation with custom metrics (e.g., precision, recall, F1-score)
47
+ def evaluate_model(model, X_test: pd.DataFrame, y_test: pd.Series):
48
+ """
49
+ Evaluates a trained model using custom metrics.
50
+
51
+ Args:
52
+ - model: The trained model.
53
+ - X_test (pd.DataFrame): The test feature data.
54
+ - y_test (pd.Series): The true labels.
55
+
56
+ Returns:
57
+ - dict: Dictionary containing custom evaluation metrics.
58
+ """
59
+ from sklearn.metrics import classification_report
60
+
61
+ y_pred = model.predict(X_test)
62
+ report = classification_report(y_test, y_pred, output_dict=True)
63
+
64
+ return report
scripts/preprocessing.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import numpy as np
3
+
4
+ # Load data
5
+ def load_data(file_path: str) -> pd.DataFrame:
6
+ """
7
+ Loads the dataset from a CSV file.
8
+
9
+ Args:
10
+ - file_path (str): Path to the dataset file.
11
+
12
+ Returns:
13
+ - pd.DataFrame: Loaded dataset.
14
+ """
15
+ return pd.read_csv(file_path)
16
+
17
+ # Clean data (e.g., handle missing values, remove duplicates)
18
+ def clean_data(df: pd.DataFrame) -> pd.DataFrame:
19
+ """
20
+ Cleans the dataset by removing duplicates and handling missing values.
21
+
22
+ Args:
23
+ - df (pd.DataFrame): The raw dataset.
24
+
25
+ Returns:
26
+ - pd.DataFrame: Cleaned dataset.
27
+ """
28
+ df = df.drop_duplicates()
29
+ df = df.fillna(df.mean()) # Simple approach: fill missing values with column mean
30
+ return df
31
+
32
+ # Normalize data (e.g., standard scaling)
33
+ def normalize_data(df: pd.DataFrame) -> pd.DataFrame:
34
+ """
35
+ Normalizes the dataset using standard scaling (z-score).
36
+
37
+ Args:
38
+ - df (pd.DataFrame): The cleaned dataset.
39
+
40
+ Returns:
41
+ - pd.DataFrame: Normalized dataset.
42
+ """
43
+ return (df - df.mean()) / df.std()
44
+
45
+ # Main function for preprocessing
46
+ def preprocess_data(file_path: str) -> pd.DataFrame:
47
+ """
48
+ Preprocesses the dataset from file by loading, cleaning, and normalizing it.
49
+
50
+ Args:
51
+ - file_path (str): Path to the dataset file.
52
+
53
+ Returns:
54
+ - pd.DataFrame: The preprocessed dataset.
55
+ """
56
+ df = load_data(file_path)
57
+ df = clean_data(df)
58
+ df = normalize_data(df)
59
+ return df
scripts/testing.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ # Generate README file with dataset description
4
+ def generate_readme(dataset_name: str, description: str, columns: list) -> None:
5
+ """
6
+ Generates a README file for the dataset, including a description and column details.
7
+
8
+ Args:
9
+ - dataset_name (str): The name of the dataset.
10
+ - description (str): Description of the dataset.
11
+ - columns (list): List of columns with descriptions.
12
+ """
13
+ readme_content = f"# {dataset_name}\n\n"
14
+ readme_content += f"## Description\n{description}\n\n"
15
+ readme_content += "## Columns\n"
16
+
17
+ for column, col_description in columns:
18
+ readme_content += f"- {column}: {col_description}\n"
19
+
20
+ # Save README.md file
21
+ with open(f"{dataset_name}/README.md", "w") as f:
22
+ f.write(readme_content)
23
+
24
+ print(f"README generated for {dataset_name}")
25
+
26
+ # Create a script for generating dataset-specific documentation
27
+ def generate_dataset_docs(df, dataset_name: str) -> None:
28
+ """
29
+ Generates a dataset documentation file with basic info such as column types.
30
+
31
+ Args:
32
+ - df (pd.DataFrame): The dataset.
33
+ - dataset_name (str): The name of the dataset.
34
+ """
35
+ columns_info = [(col, df[col].dtype) for col in df.columns]
36
+
37
+ generate_readme(dataset_name, "A dataset for modeling purposes.", columns_info)
scripts/transformation.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sklearn.preprocessing import StandardScaler, LabelEncoder
2
+ import pandas as pd
3
+
4
+ # Standardize features (e.g., scaling numerical values)
5
+ def standardize_features(df: pd.DataFrame) -> pd.DataFrame:
6
+ """
7
+ Standardizes the numerical features of the dataset to have zero mean and unit variance.
8
+
9
+ Args:
10
+ - df (pd.DataFrame): The dataset.
11
+
12
+ Returns:
13
+ - pd.DataFrame: The dataset with standardized features.
14
+ """
15
+ scaler = StandardScaler()
16
+ numeric_columns = df.select_dtypes(include=['float64', 'int64']).columns
17
+ df[numeric_columns] = scaler.fit_transform(df[numeric_columns])
18
+
19
+ return df
20
+
21
+ # Label Encoding for categorical variables
22
+ def encode_labels(df: pd.DataFrame, target_column: str) -> pd.DataFrame:
23
+ """
24
+ Encodes categorical variables into numerical labels.
25
+
26
+ Args:
27
+ - df (pd.DataFrame): The dataset.
28
+ - target_column (str): The column to encode.
29
+
30
+ Returns:
31
+ - pd.DataFrame: The dataset with encoded labels for the target column.
32
+ """
33
+ label_encoder = LabelEncoder()
34
+ df[target_column] = label_encoder.fit_transform(df[target_column])
35
+
36
+ return df
scripts/utility.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import logging
3
+ import pandas as pd
4
+ from sklearn.model_selection import train_test_split
5
+
6
+ # Setup logging for debugging and tracking
7
+ def setup_logging(log_file: str = 'data_pipeline.log'):
8
+ """
9
+ Sets up logging for the pipeline to track progress and debug.
10
+
11
+ Args:
12
+ - log_file (str): Path to the log file.
13
+ """
14
+ logging.basicConfig(filename=log_file,
15
+ level=logging.INFO,
16
+ format='%(asctime)s - %(levelname)s - %(message)s')
17
+ logging.info("Logging setup complete.")
18
+
19
+ # Split dataset into training and testing sets
20
+ def split_data(df: pd.DataFrame, target_column: str, test_size: float = 0.2):
21
+ """
22
+ Splits the dataset into training and testing sets.
23
+
24
+ Args:
25
+ - df (pd.DataFrame): The dataset.
26
+ - target_column (str): The column to predict.
27
+ - test_size (float): The proportion of data to use for testing.
28
+
29
+ Returns:
30
+ - tuple: X_train, X_test, y_train, y_test.
31
+ """
32
+ X = df.drop(columns=[target_column])
33
+ y = df[target_column]
34
+
35
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=42)
36
+
37
+ return X_train, X_test, y_train, y_test
38
+
39
+ # Save DataFrame to CSV
40
+ def save_dataframe_to_csv(df: pd.DataFrame, file_path: str):
41
+ """
42
+ Saves the DataFrame to a CSV file.
43
+
44
+ Args:
45
+ - df (pd.DataFrame): The dataset to save.
46
+ - file_path (str): Path where the CSV will be saved.
47
+ """
48
+ df.to_csv(file_path, index=False)
49
+ logging.info(f"Data saved to {file_path}")
50
+
51
+ # Load DataFrame from CSV
52
+ def load_dataframe_from_csv(file_path: str) -> pd.DataFrame:
53
+ """
54
+ Loads a CSV file into a DataFrame.
55
+
56
+ Args:
57
+ - file_path (str): Path to the CSV file.
58
+
59
+ Returns:
60
+ - pd.DataFrame: Loaded dataset.
61
+ """
62
+ if os.path.exists(file_path):
63
+ df = pd.read_csv(file_path)
64
+ logging.info(f"Data loaded from {file_path}")
65
+ return df
66
+ else:
67
+ logging.error(f"{file_path} does not exist.")
68
+ return pd.DataFrame()
scripts/utils.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ # Check if a file exists
4
+ def file_exists(file_path: str) -> bool:
5
+ """
6
+ Checks if a file exists at the given path.
7
+
8
+ Args:
9
+ - file_path (str): Path to the file.
10
+
11
+ Returns:
12
+ - bool: True if file exists, False otherwise.
13
+ """
14
+ return os.path.isfile(file_path)
15
+
16
+ # Save DataFrame to CSV
17
+ def save_to_csv(df: pd.DataFrame, file_path: str) -> None:
18
+ """
19
+ Saves a DataFrame to a CSV file.
20
+
21
+ Args:
22
+ - df (pd.DataFrame): The DataFrame to save.
23
+ - file_path (str): The path to save the file.
24
+ """
25
+ df.to_csv(file_path, index=False)
26
+
27
+ # Display basic info about the dataset (e.g., shape, column names)
28
+ def dataset_info(df: pd.DataFrame) -> None:
29
+ """
30
+ Displays basic information about the dataset.
31
+
32
+ Args:
33
+ - df (pd.DataFrame): The dataset.
34
+ """
35
+ print(f"Shape of dataset: {df.shape}")
36
+ print(f"Columns: {df.columns}")
37
+ print(f"First few rows:\n{df.head()}")
tests/test_preprocessing.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import unittest
2
+ import pandas as pd
3
+ from scripts.preprocessing import load_data, clean_data, normalize_data
4
+
5
+ class TestPreprocessing(unittest.TestCase):
6
+
7
+ def setUp(self):
8
+ # Create a simple dataset for testing
9
+ data = {
10
+ 'A': [1, 2, 3, 4, np.nan],
11
+ 'B': [5, 6, 7, 8, 9],
12
+ 'C': [10, 11, 12, 13, 14]
13
+ }
14
+ self.df = pd.DataFrame(data)
15
+
16
+ def test_load_data(self):
17
+ # Test that the load_data function works correctly
18
+ file_path = 'sample_data.csv'
19
+ self.df.to_csv(file_path, index=False) # Save test data to file
20
+ loaded_df = load_data(file_path)
21
+ self.assertEqual(loaded_df.shape, self.df.shape)
22
+
23
+ def test_clean_data(self):
24
+ # Test the clean_data function
25
+ cleaned_df = clean_data(self.df)
26
+ # After cleaning, there should be no NaN values
27
+ self.assertFalse(cleaned_df.isnull().any().any())
28
+
29
+ def test_normalize_data(self):
30
+ # Test the normalize_data function
31
+ normalized_df = normalize_data(self.df)
32
+ # The mean of each column after normalization should be close to 0
33
+ self.assertAlmostEqual(normalized_df['A'].mean(), 0, delta=0.1)
34
+ self.assertAlmostEqual(normalized_df['B'].mean(), 0, delta=0.1)
35
+
36
+ if __name__ == "__main__":
37
+ unittest.main()