Spaces:
Runtime error
Runtime error
Update
Browse files- app.py +13 -1
- augment.py +1 -0
- configs.py +4 -4
- requirements.txt +0 -0
- train.py +0 -3
app.py
CHANGED
@@ -1,5 +1,9 @@
|
|
1 |
import gradio as gr
|
2 |
import predict as predict
|
|
|
|
|
|
|
|
|
3 |
|
4 |
|
5 |
def upload_file(files):
|
@@ -29,11 +33,19 @@ def process_file(webcam_filepath, upload_filepath):
|
|
29 |
return result
|
30 |
|
31 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
demo = gr.Interface(
|
33 |
theme="gradio/soft",
|
34 |
fn=process_file,
|
35 |
title="HANDETECT",
|
36 |
-
description=
|
37 |
inputs=[
|
38 |
gr.components.Image(type="filepath", label="Choose Image", source="upload"),
|
39 |
],
|
|
|
1 |
import gradio as gr
|
2 |
import predict as predict
|
3 |
+
from googletrans import Translator, constants
|
4 |
+
from pprint import pprint
|
5 |
+
|
6 |
+
translator = Translator()
|
7 |
|
8 |
|
9 |
def upload_file(files):
|
|
|
33 |
return result
|
34 |
|
35 |
|
36 |
+
def generate_description(request: gr.Request):
|
37 |
+
translation = translator.translate(
|
38 |
+
"SqueezeNet-Based Deep Learning for Early Detection of Movement Disorders via Handwriting Assessment",
|
39 |
+
dest=str(request.request.headers["Accept-Language"].split(",")[0].lower()[0:2]),
|
40 |
+
)
|
41 |
+
return translation.text
|
42 |
+
|
43 |
+
|
44 |
demo = gr.Interface(
|
45 |
theme="gradio/soft",
|
46 |
fn=process_file,
|
47 |
title="HANDETECT",
|
48 |
+
description=generate_description,
|
49 |
inputs=[
|
50 |
gr.components.Image(type="filepath", label="Choose Image", source="upload"),
|
51 |
],
|
augment.py
CHANGED
@@ -46,6 +46,7 @@ for task in ["1"]:
|
|
46 |
p.random_brightness(probability=0.8, min_factor=0.5, max_factor=1.5)
|
47 |
p.random_contrast(probability=0.8, min_factor=0.5, max_factor=1.5)
|
48 |
p.random_color(probability=0.8, min_factor=0.5, max_factor=1.5)
|
|
|
49 |
# Generate 100 - total of original images so that the total number of images in each class is 100
|
50 |
p.sample(100 - len(p.augmentor_images))
|
51 |
# Move the folder to data/train/Task 1/augmented
|
|
|
46 |
p.random_brightness(probability=0.8, min_factor=0.5, max_factor=1.5)
|
47 |
p.random_contrast(probability=0.8, min_factor=0.5, max_factor=1.5)
|
48 |
p.random_color(probability=0.8, min_factor=0.5, max_factor=1.5)
|
49 |
+
p.rotate_random_90(probability=0.8)
|
50 |
# Generate 100 - total of original images so that the total number of images in each class is 100
|
51 |
p.sample(100 - len(p.augmentor_images))
|
52 |
# Move the folder to data/train/Task 1/augmented
|
configs.py
CHANGED
@@ -14,11 +14,11 @@ from torchvision.models import squeezenet1_0
|
|
14 |
|
15 |
# Constants
|
16 |
RANDOM_SEED = 123
|
17 |
-
BATCH_SIZE =
|
18 |
NUM_EPOCHS = 40
|
19 |
-
LEARNING_RATE =
|
20 |
STEP_SIZE = 10
|
21 |
-
GAMMA = 0.
|
22 |
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
23 |
NUM_PRINT = 100
|
24 |
TASK = 1
|
@@ -37,7 +37,7 @@ CLASSES = [
|
|
37 |
"Huntington Disease",
|
38 |
"Parkinson Disease",
|
39 |
]
|
40 |
-
MODEL_SAVE_PATH = "output/checkpoints/model.pth"
|
41 |
|
42 |
|
43 |
class SqueezeNet1_0WithDropout(nn.Module):
|
|
|
14 |
|
15 |
# Constants
|
16 |
RANDOM_SEED = 123
|
17 |
+
BATCH_SIZE = 16
|
18 |
NUM_EPOCHS = 40
|
19 |
+
LEARNING_RATE = 5.488903014780378e-05
|
20 |
STEP_SIZE = 10
|
21 |
+
GAMMA = 0.3
|
22 |
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
23 |
NUM_PRINT = 100
|
24 |
TASK = 1
|
|
|
37 |
"Huntington Disease",
|
38 |
"Parkinson Disease",
|
39 |
]
|
40 |
+
MODEL_SAVE_PATH = r"output/checkpoints/model.pth"
|
41 |
|
42 |
|
43 |
class SqueezeNet1_0WithDropout(nn.Module):
|
requirements.txt
CHANGED
Binary files a/requirements.txt and b/requirements.txt differ
|
|
train.py
CHANGED
@@ -155,9 +155,6 @@ def main_training_loop():
|
|
155 |
)
|
156 |
break
|
157 |
|
158 |
-
# Save the model
|
159 |
-
MODEL_SAVE_PATH = "output/checkpoints/model.pth"
|
160 |
-
|
161 |
# Ensure the parent directory exists
|
162 |
os.makedirs(os.path.dirname(MODEL_SAVE_PATH), exist_ok=True)
|
163 |
torch.save(model.state_dict(), MODEL_SAVE_PATH)
|
|
|
155 |
)
|
156 |
break
|
157 |
|
|
|
|
|
|
|
158 |
# Ensure the parent directory exists
|
159 |
os.makedirs(os.path.dirname(MODEL_SAVE_PATH), exist_ok=True)
|
160 |
torch.save(model.state_dict(), MODEL_SAVE_PATH)
|